| Index: src/mips/stub-cache-mips.cc | 
| diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc | 
| index c819ce2259158dbce7d626e9fda7986296281abb..c028358c21ee3039a063351f2b79653eacc46f22 100644 | 
| --- a/src/mips/stub-cache-mips.cc | 
| +++ b/src/mips/stub-cache-mips.cc | 
| @@ -1923,194 +1923,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall( | 
| } | 
|  | 
|  | 
| -Handle<Code> CallStubCompiler::CompileMathFloorCall( | 
| -    Handle<Object> object, | 
| -    Handle<JSObject> holder, | 
| -    Handle<Cell> cell, | 
| -    Handle<JSFunction> function, | 
| -    Handle<String> name, | 
| -    Code::StubType type) { | 
| -  const int argc = arguments().immediate(); | 
| -  // If the object is not a JSObject or we got an unexpected number of | 
| -  // arguments, bail out to the regular call. | 
| -  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null(); | 
| - | 
| -  Label miss, slow; | 
| -  HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss); | 
| -  if (!cell.is_null()) { | 
| -    ASSERT(cell->value() == *function); | 
| -    GenerateLoadFunctionFromCell(cell, function, &miss); | 
| -  } | 
| - | 
| -  // Load the (only) argument into v0. | 
| -  __ lw(v0, MemOperand(sp, 0 * kPointerSize)); | 
| - | 
| -  // If the argument is a smi, just return. | 
| -  STATIC_ASSERT(kSmiTag == 0); | 
| -  __ SmiTst(v0, t0); | 
| -  __ DropAndRet(argc + 1, eq, t0, Operand(zero_reg)); | 
| - | 
| -  __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); | 
| - | 
| -  Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return; | 
| - | 
| -  // If fpu is enabled, we use the floor instruction. | 
| - | 
| -  // Load the HeapNumber value. | 
| -  __ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); | 
| - | 
| -  // Backup FCSR. | 
| -  __ cfc1(a3, FCSR); | 
| -  // Clearing FCSR clears the exception mask with no side-effects. | 
| -  __ ctc1(zero_reg, FCSR); | 
| -  // Convert the argument to an integer. | 
| -  __ floor_w_d(f0, f0); | 
| - | 
| -  // Start checking for special cases. | 
| -  // Get the argument exponent and clear the sign bit. | 
| -  __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize)); | 
| -  __ And(t2, t1, Operand(~HeapNumber::kSignMask)); | 
| -  __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord); | 
| - | 
| -  // Retrieve FCSR and check for fpu errors. | 
| -  __ cfc1(t5, FCSR); | 
| -  __ And(t5, t5, Operand(kFCSRExceptionFlagMask)); | 
| -  __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg)); | 
| - | 
| -  // Check for NaN, Infinity, and -Infinity. | 
| -  // They are invariant through a Math.Floor call, so just | 
| -  // return the original argument. | 
| -  __ Subu(t3, t2, Operand(HeapNumber::kExponentMask | 
| -        >> HeapNumber::kMantissaBitsInTopWord)); | 
| -  __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg)); | 
| -  // We had an overflow or underflow in the conversion. Check if we | 
| -  // have a big exponent. | 
| -  // If greater or equal, the argument is already round and in v0. | 
| -  __ Branch(&restore_fcsr_and_return, ge, t3, | 
| -      Operand(HeapNumber::kMantissaBits)); | 
| -  __ Branch(&wont_fit_smi); | 
| - | 
| -  __ bind(&no_fpu_error); | 
| -  // Move the result back to v0. | 
| -  __ mfc1(v0, f0); | 
| -  // Check if the result fits into a smi. | 
| -  __ Addu(a1, v0, Operand(0x40000000)); | 
| -  __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg)); | 
| -  // Tag the result. | 
| -  STATIC_ASSERT(kSmiTag == 0); | 
| -  __ sll(v0, v0, kSmiTagSize); | 
| - | 
| -  // Check for -0. | 
| -  __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg)); | 
| -  // t1 already holds the HeapNumber exponent. | 
| -  __ And(t0, t1, Operand(HeapNumber::kSignMask)); | 
| -  // If our HeapNumber is negative it was -0, so load its address and return. | 
| -  // Else v0 is loaded with 0, so we can also just return. | 
| -  __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg)); | 
| -  __ lw(v0, MemOperand(sp, 0 * kPointerSize)); | 
| - | 
| -  __ bind(&restore_fcsr_and_return); | 
| -  // Restore FCSR and return. | 
| -  __ ctc1(a3, FCSR); | 
| - | 
| -  __ DropAndRet(argc + 1); | 
| - | 
| -  __ bind(&wont_fit_smi); | 
| -  // Restore FCSR and fall to slow case. | 
| -  __ ctc1(a3, FCSR); | 
| - | 
| -  __ bind(&slow); | 
| -  // We do not have to patch the receiver because the function makes no use of | 
| -  // it. | 
| -  GenerateJumpFunctionIgnoreReceiver(function); | 
| - | 
| -  HandlerFrontendFooter(&miss); | 
| - | 
| -  // Return the generated code. | 
| -  return GetCode(type, name); | 
| -} | 
| - | 
| - | 
| -Handle<Code> CallStubCompiler::CompileMathAbsCall( | 
| -    Handle<Object> object, | 
| -    Handle<JSObject> holder, | 
| -    Handle<Cell> cell, | 
| -    Handle<JSFunction> function, | 
| -    Handle<String> name, | 
| -    Code::StubType type) { | 
| -  const int argc = arguments().immediate(); | 
| -  // If the object is not a JSObject or we got an unexpected number of | 
| -  // arguments, bail out to the regular call. | 
| -  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null(); | 
| - | 
| -  Label miss; | 
| - | 
| -  HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss); | 
| -  if (!cell.is_null()) { | 
| -    ASSERT(cell->value() == *function); | 
| -    GenerateLoadFunctionFromCell(cell, function, &miss); | 
| -  } | 
| - | 
| -  // Load the (only) argument into v0. | 
| -  __ lw(v0, MemOperand(sp, 0 * kPointerSize)); | 
| - | 
| -  // Check if the argument is a smi. | 
| -  Label not_smi; | 
| -  STATIC_ASSERT(kSmiTag == 0); | 
| -  __ JumpIfNotSmi(v0, ¬_smi); | 
| - | 
| -  // Do bitwise not or do nothing depending on the sign of the | 
| -  // argument. | 
| -  __ sra(t0, v0, kBitsPerInt - 1); | 
| -  __ Xor(a1, v0, t0); | 
| - | 
| -  // Add 1 or do nothing depending on the sign of the argument. | 
| -  __ Subu(v0, a1, t0); | 
| - | 
| -  // If the result is still negative, go to the slow case. | 
| -  // This only happens for the most negative smi. | 
| -  Label slow; | 
| -  __ Branch(&slow, lt, v0, Operand(zero_reg)); | 
| - | 
| -  // Smi case done. | 
| -  __ DropAndRet(argc + 1); | 
| - | 
| -  // Check if the argument is a heap number and load its exponent and | 
| -  // sign. | 
| -  __ bind(¬_smi); | 
| -  __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); | 
| -  __ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset)); | 
| - | 
| -  // Check the sign of the argument. If the argument is positive, | 
| -  // just return it. | 
| -  Label negative_sign; | 
| -  __ And(t0, a1, Operand(HeapNumber::kSignMask)); | 
| -  __ Branch(&negative_sign, ne, t0, Operand(zero_reg)); | 
| -  __ DropAndRet(argc + 1); | 
| - | 
| -  // If the argument is negative, clear the sign, and return a new | 
| -  // number. | 
| -  __ bind(&negative_sign); | 
| -  __ Xor(a1, a1, Operand(HeapNumber::kSignMask)); | 
| -  __ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset)); | 
| -  __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex); | 
| -  __ AllocateHeapNumber(v0, t0, t1, t2, &slow); | 
| -  __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset)); | 
| -  __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset)); | 
| -  __ DropAndRet(argc + 1); | 
| - | 
| -  __ bind(&slow); | 
| -  // We do not have to patch the receiver because the function makes no use of | 
| -  // it. | 
| -  GenerateJumpFunctionIgnoreReceiver(function); | 
| - | 
| -  HandlerFrontendFooter(&miss); | 
| - | 
| -  // Return the generated code. | 
| -  return GetCode(type, name); | 
| -} | 
| - | 
| - | 
| Handle<Code> CallStubCompiler::CompileFastApiCall( | 
| const CallOptimization& optimization, | 
| Handle<Object> object, | 
|  |