Index: src/mips/code-stubs-mips.cc |
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc |
index 521b8e58f05cecb8ce684b009499a46ee4b22b77..c221bfa3bb9dd1fb24dbbf5f1f0355eae0a325d8 100644 |
--- a/src/mips/code-stubs-mips.cc |
+++ b/src/mips/code-stubs-mips.cc |
@@ -615,7 +615,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, |
void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, |
Register object, |
Destination destination, |
- FPURegister double_dst, |
+ DoubleRegister double_dst, |
Register dst1, |
Register dst2, |
Register heap_number_map, |
@@ -651,25 +651,16 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, |
// Load the double value. |
__ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); |
- // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). |
- // On MIPS a lot of things cannot be implemented the same way so right |
- // now it makes a lot more sense to just do things manually. |
- |
- // Save FCSR. |
- __ cfc1(scratch1, FCSR); |
- // Disable FPU exceptions. |
- __ ctc1(zero_reg, FCSR); |
- __ trunc_w_d(single_scratch, double_dst); |
- // Retrieve FCSR. |
- __ cfc1(scratch2, FCSR); |
- // Restore FCSR. |
- __ ctc1(scratch1, FCSR); |
- |
- // Check for inexact conversion or exception. |
- __ And(scratch2, scratch2, kFCSRFlagMask); |
+ Register except_flag = scratch2; |
+ __ EmitFPUTruncate(kRoundToZero, |
+ single_scratch, |
+ double_dst, |
+ scratch1, |
+ except_flag, |
+ kCheckForInexactConversion); |
// Jump to not_int32 if the operation did not succeed. |
- __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); |
+ __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); |
if (destination == kCoreRegisters) { |
__ Move(dst1, dst2, double_dst); |
@@ -706,7 +697,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, |
Register scratch1, |
Register scratch2, |
Register scratch3, |
- FPURegister double_scratch, |
+ DoubleRegister double_scratch, |
Label* not_int32) { |
ASSERT(!dst.is(object)); |
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); |
@@ -735,27 +726,19 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, |
// Load the double value. |
__ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); |
- // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). |
- // On MIPS a lot of things cannot be implemented the same way so right |
- // now it makes a lot more sense to just do things manually. |
- |
- // Save FCSR. |
- __ cfc1(scratch1, FCSR); |
- // Disable FPU exceptions. |
- __ ctc1(zero_reg, FCSR); |
- __ trunc_w_d(double_scratch, double_scratch); |
- // Retrieve FCSR. |
- __ cfc1(scratch2, FCSR); |
- // Restore FCSR. |
- __ ctc1(scratch1, FCSR); |
- |
- // Check for inexact conversion or exception. |
- __ And(scratch2, scratch2, kFCSRFlagMask); |
+ FPURegister single_scratch = double_scratch.low(); |
+ Register except_flag = scratch2; |
+ __ EmitFPUTruncate(kRoundToZero, |
+ single_scratch, |
+ double_scratch, |
+ scratch1, |
+ except_flag, |
+ kCheckForInexactConversion); |
// Jump to not_int32 if the operation did not succeed. |
- __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); |
+ __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); |
// Get the result in the destination register. |
- __ mfc1(dst, double_scratch); |
+ __ mfc1(dst, single_scratch); |
} else { |
// Load the double value in the destination registers. |
@@ -883,7 +866,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( |
} |
// Call C routine that may not cause GC or other trouble. |
__ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), |
- 4); |
+ 0, 2); |
// Store answer in the overwritable heap number. |
if (!IsMipsSoftFloatABI) { |
CpuFeatures::Scope scope(FPU); |
@@ -1258,7 +1241,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { |
if (!CpuFeatures::IsSupported(FPU)) { |
__ push(ra); |
- __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments. |
+ __ PrepareCallCFunction(0, 2, t4); |
if (!IsMipsSoftFloatABI) { |
// We are not using MIPS FPU instructions, and parameters for the runtime |
// function call are prepaired in a0-a3 registers, but function we are |
@@ -1268,19 +1251,15 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { |
__ Move(f12, a0, a1); |
__ Move(f14, a2, a3); |
} |
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4); |
+ __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), |
+ 0, 2); |
__ pop(ra); // Because this function returns int, result is in v0. |
__ Ret(); |
} else { |
CpuFeatures::Scope scope(FPU); |
Label equal, less_than; |
- __ c(EQ, D, f12, f14); |
- __ bc1t(&equal); |
- __ nop(); |
- |
- __ c(OLT, D, f12, f14); |
- __ bc1t(&less_than); |
- __ nop(); |
+ __ BranchF(&equal, NULL, eq, f12, f14); |
+ __ BranchF(&less_than, NULL, lt, f12, f14); |
// Not equal, not less, not NaN, must be greater. |
__ li(v0, Operand(GREATER)); |
@@ -1473,9 +1452,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
__ JumpIfSmi(probe, not_found); |
__ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); |
__ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); |
- __ c(EQ, D, f12, f14); |
- __ bc1t(&load_result_from_cache); |
- __ nop(); // bc1t() requires explicit fill of branch delay slot. |
+ __ BranchF(&load_result_from_cache, NULL, eq, f12, f14); |
__ Branch(not_found); |
} else { |
// Note that there is no cache check for non-FPU case, even though |
@@ -1591,9 +1568,7 @@ void CompareStub::Generate(MacroAssembler* masm) { |
__ li(t2, Operand(EQUAL)); |
// Check if either rhs or lhs is NaN. |
- __ c(UN, D, f12, f14); |
- __ bc1t(&nan); |
- __ nop(); |
+ __ BranchF(NULL, &nan, eq, f12, f14); |
// Check if LESS condition is satisfied. If true, move conditionally |
// result to v0. |
@@ -1711,89 +1686,116 @@ void CompareStub::Generate(MacroAssembler* masm) { |
} |
-// The stub returns zero for false, and a non-zero value for true. |
+// The stub expects its argument in the tos_ register and returns its result in |
+// it, too: zero for false, and a non-zero value for true. |
void ToBooleanStub::Generate(MacroAssembler* masm) { |
// This stub uses FPU instructions. |
CpuFeatures::Scope scope(FPU); |
- Label false_result; |
- Label not_heap_number; |
- Register scratch0 = t5.is(tos_) ? t3 : t5; |
- |
- // undefined -> false |
- __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex); |
- __ Branch(&false_result, eq, tos_, Operand(scratch0)); |
- |
- // Boolean -> its value |
- __ LoadRoot(scratch0, Heap::kFalseValueRootIndex); |
- __ Branch(&false_result, eq, tos_, Operand(scratch0)); |
- __ LoadRoot(scratch0, Heap::kTrueValueRootIndex); |
- // "tos_" is a register and contains a non-zero value. Hence we implicitly |
- // return true if the equal condition is satisfied. |
- __ Ret(eq, tos_, Operand(scratch0)); |
- |
- // Smis: 0 -> false, all other -> true |
- __ And(scratch0, tos_, tos_); |
- __ Branch(&false_result, eq, scratch0, Operand(zero_reg)); |
- __ And(scratch0, tos_, Operand(kSmiTagMask)); |
- // "tos_" is a register and contains a non-zero value. Hence we implicitly |
- // return true if the not equal condition is satisfied. |
- __ Ret(eq, scratch0, Operand(zero_reg)); |
- |
- // 'null' -> false |
- __ LoadRoot(scratch0, Heap::kNullValueRootIndex); |
- __ Branch(&false_result, eq, tos_, Operand(scratch0)); |
- |
- // HeapNumber => false if +0, -0, or NaN. |
- __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
- __ Branch(¬_heap_number, ne, scratch0, Operand(at)); |
- |
- __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset)); |
- __ fcmp(f12, 0.0, UEQ); |
- |
- // "tos_" is a register, and contains a non zero value by default. |
- // Hence we only need to overwrite "tos_" with zero to return false for |
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. |
- __ movt(tos_, zero_reg); |
- __ Ret(); |
+ Label patch; |
+ const Register map = t5.is(tos_) ? t3 : t5; |
- __ bind(¬_heap_number); |
- |
- // It can be an undetectable object. |
- // Undetectable => false. |
- __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
- __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset)); |
- __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable)); |
- __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable)); |
- |
- // JavaScript object => true. |
- __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
- __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); |
- |
- // "tos_" is a register and contains a non-zero value. |
- // Hence we implicitly return true if the greater than |
- // condition is satisfied. |
- __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); |
- |
- // Check for string. |
- __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
- __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); |
- // "tos_" is a register and contains a non-zero value. |
- // Hence we implicitly return true if the greater than |
- // condition is satisfied. |
- __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE)); |
- |
- // String value => false iff empty, i.e., length is zero. |
- __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset)); |
- // If length is zero, "tos_" contains zero ==> false. |
- // If length is not zero, "tos_" contains a non-zero value ==> true. |
- __ Ret(); |
+ // undefined -> false. |
+ CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); |
- // Return 0 in "tos_" for false. |
- __ bind(&false_result); |
- __ mov(tos_, zero_reg); |
- __ Ret(); |
+ // Boolean -> its value. |
+ CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); |
+ CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); |
+ |
+ // 'null' -> false. |
+ CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false); |
+ |
+ if (types_.Contains(SMI)) { |
+ // Smis: 0 -> false, all other -> true |
+ __ And(at, tos_, kSmiTagMask); |
+ // tos_ contains the correct return value already |
+ __ Ret(eq, at, Operand(zero_reg)); |
+ } else if (types_.NeedsMap()) { |
+ // If we need a map later and have a Smi -> patch. |
+ __ JumpIfSmi(tos_, &patch); |
+ } |
+ |
+ if (types_.NeedsMap()) { |
+ __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
+ |
+ if (types_.CanBeUndetectable()) { |
+ __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); |
+ __ And(at, at, Operand(1 << Map::kIsUndetectable)); |
+ // Undetectable -> false. |
+ __ movn(tos_, zero_reg, at); |
+ __ Ret(ne, at, Operand(zero_reg)); |
+ } |
+ } |
+ |
+ if (types_.Contains(SPEC_OBJECT)) { |
+ // Spec object -> true. |
+ __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
+ // tos_ contains the correct non-zero return value already. |
+ __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE)); |
+ } |
+ |
+ if (types_.Contains(STRING)) { |
+ // String value -> false iff empty. |
+ __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
+ Label skip; |
+ __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE)); |
+ __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset)); |
+ __ Ret(); // the string length is OK as the return value |
+ __ bind(&skip); |
+ } |
+ |
+ if (types_.Contains(HEAP_NUMBER)) { |
+ // Heap number -> false iff +0, -0, or NaN. |
+ Label not_heap_number; |
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
+ __ Branch(¬_heap_number, ne, map, Operand(at)); |
+ Label zero_or_nan, number; |
+ __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset)); |
+ __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero); |
+ // "tos_" is a register, and contains a non zero value by default. |
+ // Hence we only need to overwrite "tos_" with zero to return false for |
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. |
+ __ bind(&zero_or_nan); |
+ __ mov(tos_, zero_reg); |
+ __ bind(&number); |
+ __ Ret(); |
+ __ bind(¬_heap_number); |
+ } |
+ |
+ __ bind(&patch); |
+ GenerateTypeTransition(masm); |
+} |
+ |
+ |
+void ToBooleanStub::CheckOddball(MacroAssembler* masm, |
+ Type type, |
+ Heap::RootListIndex value, |
+ bool result) { |
+ if (types_.Contains(type)) { |
+ // If we see an expected oddball, return its ToBoolean value tos_. |
+ __ LoadRoot(at, value); |
+ __ Subu(at, at, tos_); // This is a check for equality for the movz below. |
+ // The value of a root is never NULL, so we can avoid loading a non-null |
+ // value into tos_ when we want to return 'true'. |
+ if (!result) { |
+ __ movz(tos_, zero_reg, at); |
+ } |
+ __ Ret(eq, at, Operand(zero_reg)); |
+ } |
+} |
+ |
+ |
+void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { |
+ __ Move(a3, tos_); |
+ __ li(a2, Operand(Smi::FromInt(tos_.code()))); |
+ __ li(a1, Operand(Smi::FromInt(types_.ToByte()))); |
+ __ Push(a3, a2, a1); |
+ // Patch the caller to an appropriate specialized stub and return the |
+ // operation result to the caller of the stub. |
+ __ TailCallExternalReference( |
+ ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()), |
+ 3, |
+ 1); |
} |
@@ -2717,26 +2719,16 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
// Otherwise return a heap number if allowed, or jump to type |
// transition. |
- // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). |
- // On MIPS a lot of things cannot be implemented the same way so right |
- // now it makes a lot more sense to just do things manually. |
- |
- // Save FCSR. |
- __ cfc1(scratch1, FCSR); |
- // Disable FPU exceptions. |
- __ ctc1(zero_reg, FCSR); |
- __ trunc_w_d(single_scratch, f10); |
- // Retrieve FCSR. |
- __ cfc1(scratch2, FCSR); |
- // Restore FCSR. |
- __ ctc1(scratch1, FCSR); |
- |
- // Check for inexact conversion or exception. |
- __ And(scratch2, scratch2, kFCSRFlagMask); |
+ Register except_flag = scratch2; |
+ __ EmitFPUTruncate(kRoundToZero, |
+ single_scratch, |
+ f10, |
+ scratch1, |
+ except_flag); |
if (result_type_ <= BinaryOpIC::INT32) { |
- // If scratch2 != 0, result does not fit in a 32-bit integer. |
- __ Branch(&transition, ne, scratch2, Operand(zero_reg)); |
+ // If except_flag != 0, result does not fit in a 32-bit integer. |
+ __ Branch(&transition, ne, except_flag, Operand(zero_reg)); |
} |
// Check if the result fits in a smi. |
@@ -3225,7 +3217,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
__ lw(t0, MemOperand(cache_entry, 0)); |
__ lw(t1, MemOperand(cache_entry, 4)); |
__ lw(t2, MemOperand(cache_entry, 8)); |
- __ Addu(cache_entry, cache_entry, 12); |
__ Branch(&calculate, ne, a2, Operand(t0)); |
__ Branch(&calculate, ne, a3, Operand(t1)); |
// Cache hit. Load result, cleanup and return. |
@@ -3259,13 +3250,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
// Register a0 holds precalculated cache entry address; preserve |
// it on the stack and pop it into register cache_entry after the |
// call. |
- __ push(cache_entry); |
+ __ Push(cache_entry, a2, a3); |
GenerateCallCFunction(masm, scratch0); |
__ GetCFunctionDoubleResult(f4); |
// Try to update the cache. If we cannot allocate a |
// heap number, we return the result without updating. |
- __ pop(cache_entry); |
+ __ Pop(cache_entry, a2, a3); |
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); |
__ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update); |
__ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); |
@@ -3317,22 +3308,25 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, |
__ push(ra); |
__ PrepareCallCFunction(2, scratch); |
if (IsMipsSoftFloatABI) { |
- __ Move(v0, v1, f4); |
+ __ Move(a0, a1, f4); |
} else { |
__ mov_d(f12, f4); |
} |
switch (type_) { |
case TranscendentalCache::SIN: |
__ CallCFunction( |
- ExternalReference::math_sin_double_function(masm->isolate()), 2); |
+ ExternalReference::math_sin_double_function(masm->isolate()), |
+ 0, 1); |
break; |
case TranscendentalCache::COS: |
__ CallCFunction( |
- ExternalReference::math_cos_double_function(masm->isolate()), 2); |
+ ExternalReference::math_cos_double_function(masm->isolate()), |
+ 0, 1); |
break; |
case TranscendentalCache::LOG: |
__ CallCFunction( |
- ExternalReference::math_log_double_function(masm->isolate()), 2); |
+ ExternalReference::math_log_double_function(masm->isolate()), |
+ 0, 1); |
break; |
default: |
UNIMPLEMENTED(); |
@@ -3415,10 +3409,11 @@ void MathPowStub::Generate(MacroAssembler* masm) { |
heapnumbermap, |
&call_runtime); |
__ push(ra); |
- __ PrepareCallCFunction(3, scratch); |
+ __ PrepareCallCFunction(1, 1, scratch); |
__ SetCallCDoubleArguments(double_base, exponent); |
__ CallCFunction( |
- ExternalReference::power_double_int_function(masm->isolate()), 3); |
+ ExternalReference::power_double_int_function(masm->isolate()), |
+ 1, 1); |
__ pop(ra); |
__ GetCFunctionDoubleResult(double_result); |
__ sdc1(double_result, |
@@ -3443,13 +3438,14 @@ void MathPowStub::Generate(MacroAssembler* masm) { |
heapnumbermap, |
&call_runtime); |
__ push(ra); |
- __ PrepareCallCFunction(4, scratch); |
+ __ PrepareCallCFunction(0, 2, scratch); |
// ABI (o32) for func(double a, double b): a in f12, b in f14. |
ASSERT(double_base.is(f12)); |
ASSERT(double_exponent.is(f14)); |
__ SetCallCDoubleArguments(double_base, double_exponent); |
__ CallCFunction( |
- ExternalReference::power_double_double_function(masm->isolate()), 4); |
+ ExternalReference::power_double_double_function(masm->isolate()), |
+ 0, 2); |
__ pop(ra); |
__ GetCFunctionDoubleResult(double_result); |
__ sdc1(double_result, |
@@ -3493,9 +3489,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, |
if (do_gc) { |
// Move result passed in v0 into a0 to call PerformGC. |
__ mov(a0, v0); |
- __ PrepareCallCFunction(1, a1); |
+ __ PrepareCallCFunction(1, 0, a1); |
__ CallCFunction( |
- ExternalReference::perform_gc_function(masm->isolate()), 1); |
+ ExternalReference::perform_gc_function(masm->isolate()), |
+ 1, 0); |
} |
ExternalReference scope_depth = |
@@ -3699,8 +3696,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
CpuFeatures::Scope scope(FPU); |
// Save callee-saved FPU registers. |
__ MultiPushFPU(kCalleeSavedFPU); |
+ // Set up the reserved register for 0.0. |
+ __ Move(kDoubleRegZero, 0.0); |
} |
+ |
// Load argv in s0 register. |
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; |
if (CpuFeatures::IsSupported(FPU)) { |
@@ -3857,11 +3857,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
// * object: a0 or at sp + 1 * kPointerSize. |
// * function: a1 or at sp. |
// |
-// Inlined call site patching is a crankshaft-specific feature that is not |
-// implemented on MIPS. |
+// An inlined call site may have been generated before calling this stub. |
+// In this case the offset to the inline site to patch is passed on the stack, |
+// in the safepoint slot for register t0. |
void InstanceofStub::Generate(MacroAssembler* masm) { |
- // This is a crankshaft-specific feature that has not been implemented yet. |
- ASSERT(!HasCallSiteInlineCheck()); |
// Call site inlining and patching implies arguments in registers. |
ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); |
// ReturnTrueFalse is only implemented for inlined call sites. |
@@ -3875,6 +3874,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) { |
const Register inline_site = t5; |
const Register scratch = a2; |
+ const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize; |
+ |
Label slow, loop, is_instance, is_not_instance, not_js_object; |
if (!HasArgsInRegisters()) { |
@@ -3890,10 +3891,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) { |
// real lookup and update the call site cache. |
if (!HasCallSiteInlineCheck()) { |
Label miss; |
- __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex); |
- __ Branch(&miss, ne, function, Operand(t1)); |
- __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex); |
- __ Branch(&miss, ne, map, Operand(t1)); |
+ __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex); |
+ __ Branch(&miss, ne, function, Operand(at)); |
+ __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex); |
+ __ Branch(&miss, ne, map, Operand(at)); |
__ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
__ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
@@ -3913,7 +3914,15 @@ void InstanceofStub::Generate(MacroAssembler* masm) { |
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); |
} else { |
- UNIMPLEMENTED_MIPS(); |
+ ASSERT(HasArgsInRegisters()); |
+ // Patch the (relocated) inlined map check. |
+ |
+ // The offset was stored in t0 safepoint slot. |
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) |
+ __ LoadFromSafepointRegisterSlot(scratch, t0); |
+ __ Subu(inline_site, ra, scratch); |
+ // Patch the relocated value to map. |
+ __ PatchRelocatedValue(inline_site, scratch, map); |
} |
// Register mapping: a3 is object map and t0 is function prototype. |
@@ -3939,7 +3948,16 @@ void InstanceofStub::Generate(MacroAssembler* masm) { |
__ mov(v0, zero_reg); |
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
} else { |
- UNIMPLEMENTED_MIPS(); |
+ // Patch the call site to return true. |
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex); |
+ __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
+ // Get the boolean result location in scratch and patch it. |
+ __ PatchRelocatedValue(inline_site, scratch, v0); |
+ |
+ if (!ReturnTrueFalseObject()) { |
+ ASSERT_EQ(Smi::FromInt(0), 0); |
+ __ mov(v0, zero_reg); |
+ } |
} |
__ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
@@ -3948,8 +3966,17 @@ void InstanceofStub::Generate(MacroAssembler* masm) { |
__ li(v0, Operand(Smi::FromInt(1))); |
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
} else { |
- UNIMPLEMENTED_MIPS(); |
+ // Patch the call site to return false. |
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex); |
+ __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
+ // Get the boolean result location in scratch and patch it. |
+ __ PatchRelocatedValue(inline_site, scratch, v0); |
+ |
+ if (!ReturnTrueFalseObject()) { |
+ __ li(v0, Operand(Smi::FromInt(1))); |
+ } |
} |
+ |
__ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
Label object_not_null, object_not_null_or_smi; |
@@ -6463,39 +6490,25 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
__ Subu(a2, a0, Operand(kHeapObjectTag)); |
__ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); |
- Label fpu_eq, fpu_lt, fpu_gt; |
- // Compare operands (test if unordered). |
- __ c(UN, D, f0, f2); |
- // Don't base result on status bits when a NaN is involved. |
- __ bc1t(&unordered); |
- __ nop(); |
- |
- // Test if equal. |
- __ c(EQ, D, f0, f2); |
- __ bc1t(&fpu_eq); |
- __ nop(); |
+ // Return a result of -1, 0, or 1, or use CompareStub for NaNs. |
+ Label fpu_eq, fpu_lt; |
+ // Test if equal, and also handle the unordered/NaN case. |
+ __ BranchF(&fpu_eq, &unordered, eq, f0, f2); |
- // Test if unordered or less (unordered case is already handled). |
- __ c(ULT, D, f0, f2); |
- __ bc1t(&fpu_lt); |
- __ nop(); |
+ // Test if less (unordered case is already handled). |
+ __ BranchF(&fpu_lt, NULL, lt, f0, f2); |
- // Otherwise it's greater. |
- __ bc1f(&fpu_gt); |
- __ nop(); |
+ // Otherwise it's greater, so just fall thru, and return. |
+ __ Ret(USE_DELAY_SLOT); |
+ __ li(v0, Operand(GREATER)); // In delay slot. |
- // Return a result of -1, 0, or 1. |
__ bind(&fpu_eq); |
- __ li(v0, Operand(EQUAL)); |
- __ Ret(); |
+ __ Ret(USE_DELAY_SLOT); |
+ __ li(v0, Operand(EQUAL)); // In delay slot. |
__ bind(&fpu_lt); |
- __ li(v0, Operand(LESS)); |
- __ Ret(); |
- |
- __ bind(&fpu_gt); |
- __ li(v0, Operand(GREATER)); |
- __ Ret(); |
+ __ Ret(USE_DELAY_SLOT); |
+ __ li(v0, Operand(LESS)); // In delay slot. |
__ bind(&unordered); |
} |