| Index: src/mips/code-stubs-mips.cc
|
| diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
|
| index 2c42001ba7771d7907a6da470cbcdbc448fe3c9b..710901e9c5aecf7dbe225c422dccd080746af318 100644
|
| --- a/src/mips/code-stubs-mips.cc
|
| +++ b/src/mips/code-stubs-mips.cc
|
| @@ -521,290 +521,6 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
|
| }
|
|
|
|
|
| -void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
|
| - FloatingPointHelper::Destination destination,
|
| - Register scratch1,
|
| - Register scratch2) {
|
| - __ sra(scratch1, a0, kSmiTagSize);
|
| - __ mtc1(scratch1, f14);
|
| - __ cvt_d_w(f14, f14);
|
| - __ sra(scratch1, a1, kSmiTagSize);
|
| - __ mtc1(scratch1, f12);
|
| - __ cvt_d_w(f12, f12);
|
| - if (destination == kCoreRegisters) {
|
| - __ Move(a2, a3, f14);
|
| - __ Move(a0, a1, f12);
|
| - }
|
| -}
|
| -
|
| -
|
| -void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
|
| - Destination destination,
|
| - Register object,
|
| - FPURegister dst,
|
| - Register dst1,
|
| - Register dst2,
|
| - Register heap_number_map,
|
| - Register scratch1,
|
| - Register scratch2,
|
| - Label* not_number) {
|
| - __ AssertRootValue(heap_number_map,
|
| - Heap::kHeapNumberMapRootIndex,
|
| - kHeapNumberMapRegisterClobbered);
|
| -
|
| - Label is_smi, done;
|
| -
|
| - // Smi-check
|
| - __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
|
| - // Heap number check
|
| - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
|
| -
|
| - // Handle loading a double from a heap number.
|
| - if (destination == kFPURegisters) {
|
| - // Load the double from tagged HeapNumber to double register.
|
| -
|
| - // ARM uses a workaround here because of the unaligned HeapNumber
|
| - // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
|
| - // point in generating even more instructions.
|
| - __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
|
| - } else {
|
| - ASSERT(destination == kCoreRegisters);
|
| - // Load the double from heap number to dst1 and dst2 in double format.
|
| - __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
|
| - __ lw(dst2, FieldMemOperand(object,
|
| - HeapNumber::kValueOffset + kPointerSize));
|
| - }
|
| - __ Branch(&done);
|
| -
|
| - // Handle loading a double from a smi.
|
| - __ bind(&is_smi);
|
| - // Convert smi to double using FPU instructions.
|
| - __ mtc1(scratch1, dst);
|
| - __ cvt_d_w(dst, dst);
|
| - if (destination == kCoreRegisters) {
|
| - // Load the converted smi to dst1 and dst2 in double format.
|
| - __ Move(dst1, dst2, dst);
|
| - }
|
| - __ bind(&done);
|
| -}
|
| -
|
| -
|
| -void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
|
| - Register object,
|
| - Register dst,
|
| - Register heap_number_map,
|
| - Register scratch1,
|
| - Register scratch2,
|
| - Register scratch3,
|
| - FPURegister double_scratch,
|
| - Label* not_number) {
|
| - __ AssertRootValue(heap_number_map,
|
| - Heap::kHeapNumberMapRootIndex,
|
| - kHeapNumberMapRegisterClobbered);
|
| - Label done;
|
| - Label not_in_int32_range;
|
| -
|
| - __ UntagAndJumpIfSmi(dst, object, &done);
|
| - __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
|
| - __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
|
| - __ ConvertToInt32(object,
|
| - dst,
|
| - scratch1,
|
| - scratch2,
|
| - double_scratch,
|
| - ¬_in_int32_range);
|
| - __ jmp(&done);
|
| -
|
| - __ bind(¬_in_int32_range);
|
| - __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
|
| - __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
|
| -
|
| - __ EmitOutOfInt32RangeTruncate(dst,
|
| - scratch1,
|
| - scratch2,
|
| - scratch3);
|
| -
|
| - __ bind(&done);
|
| -}
|
| -
|
| -
|
| -void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
|
| - Register int_scratch,
|
| - Destination destination,
|
| - FPURegister double_dst,
|
| - Register dst_mantissa,
|
| - Register dst_exponent,
|
| - Register scratch2,
|
| - FPURegister single_scratch) {
|
| - ASSERT(!int_scratch.is(scratch2));
|
| - ASSERT(!int_scratch.is(dst_mantissa));
|
| - ASSERT(!int_scratch.is(dst_exponent));
|
| -
|
| - __ mtc1(int_scratch, single_scratch);
|
| - __ cvt_d_w(double_dst, single_scratch);
|
| - if (destination == kCoreRegisters) {
|
| - __ Move(dst_mantissa, dst_exponent, double_dst);
|
| - }
|
| -}
|
| -
|
| -
|
| -void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
|
| - Register object,
|
| - Destination destination,
|
| - DoubleRegister double_dst,
|
| - DoubleRegister double_scratch,
|
| - Register dst_mantissa,
|
| - Register dst_exponent,
|
| - Register heap_number_map,
|
| - Register scratch1,
|
| - Register scratch2,
|
| - FPURegister single_scratch,
|
| - Label* not_int32) {
|
| - ASSERT(!scratch1.is(object) && !scratch2.is(object));
|
| - ASSERT(!scratch1.is(scratch2));
|
| - ASSERT(!heap_number_map.is(object) &&
|
| - !heap_number_map.is(scratch1) &&
|
| - !heap_number_map.is(scratch2));
|
| -
|
| - Label done, obj_is_not_smi;
|
| -
|
| - __ JumpIfNotSmi(object, &obj_is_not_smi);
|
| - __ SmiUntag(scratch1, object);
|
| - ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
|
| - dst_exponent, scratch2, single_scratch);
|
| - __ Branch(&done);
|
| -
|
| - __ bind(&obj_is_not_smi);
|
| - __ AssertRootValue(heap_number_map,
|
| - Heap::kHeapNumberMapRootIndex,
|
| - kHeapNumberMapRegisterClobbered);
|
| - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
|
| -
|
| - // Load the number.
|
| - // Load the double value.
|
| - __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
|
| -
|
| - Register except_flag = scratch2;
|
| - __ EmitFPUTruncate(kRoundToZero,
|
| - scratch1,
|
| - double_dst,
|
| - at,
|
| - double_scratch,
|
| - except_flag,
|
| - kCheckForInexactConversion);
|
| -
|
| - // Jump to not_int32 if the operation did not succeed.
|
| - __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
|
| - if (destination == kCoreRegisters) {
|
| - __ Move(dst_mantissa, dst_exponent, double_dst);
|
| - }
|
| - __ bind(&done);
|
| -}
|
| -
|
| -
|
| -void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
| - Register object,
|
| - Register dst,
|
| - Register heap_number_map,
|
| - Register scratch1,
|
| - Register scratch2,
|
| - Register scratch3,
|
| - DoubleRegister double_scratch0,
|
| - DoubleRegister double_scratch1,
|
| - Label* not_int32) {
|
| - ASSERT(!dst.is(object));
|
| - ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
|
| - ASSERT(!scratch1.is(scratch2) &&
|
| - !scratch1.is(scratch3) &&
|
| - !scratch2.is(scratch3));
|
| -
|
| - Label done, maybe_undefined;
|
| -
|
| - __ UntagAndJumpIfSmi(dst, object, &done);
|
| -
|
| - __ AssertRootValue(heap_number_map,
|
| - Heap::kHeapNumberMapRootIndex,
|
| - kHeapNumberMapRegisterClobbered);
|
| -
|
| - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
|
| -
|
| - // Object is a heap number.
|
| - // Convert the floating point value to a 32-bit integer.
|
| - // Load the double value.
|
| - __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
|
| -
|
| - Register except_flag = scratch2;
|
| - __ EmitFPUTruncate(kRoundToZero,
|
| - dst,
|
| - double_scratch0,
|
| - scratch1,
|
| - double_scratch1,
|
| - except_flag,
|
| - kCheckForInexactConversion);
|
| -
|
| - // Jump to not_int32 if the operation did not succeed.
|
| - __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
|
| - __ Branch(&done);
|
| -
|
| - __ bind(&maybe_undefined);
|
| - __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
|
| - __ Branch(not_int32, ne, object, Operand(at));
|
| - // |undefined| is truncated to 0.
|
| - __ li(dst, Operand(Smi::FromInt(0)));
|
| - // Fall through.
|
| -
|
| - __ bind(&done);
|
| -}
|
| -
|
| -
|
| -void FloatingPointHelper::CallCCodeForDoubleOperation(
|
| - MacroAssembler* masm,
|
| - Token::Value op,
|
| - Register heap_number_result,
|
| - Register scratch) {
|
| - // Using core registers:
|
| - // a0: Left value (least significant part of mantissa).
|
| - // a1: Left value (sign, exponent, top of mantissa).
|
| - // a2: Right value (least significant part of mantissa).
|
| - // a3: Right value (sign, exponent, top of mantissa).
|
| -
|
| - // Assert that heap_number_result is saved.
|
| - // We currently always use s0 to pass it.
|
| - ASSERT(heap_number_result.is(s0));
|
| -
|
| - // Push the current return address before the C call.
|
| - __ push(ra);
|
| - __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
|
| - if (!IsMipsSoftFloatABI) {
|
| - // We are not using MIPS FPU instructions, and parameters for the runtime
|
| - // function call are prepaired in a0-a3 registers, but function we are
|
| - // calling is compiled with hard-float flag and expecting hard float ABI
|
| - // (parameters in f12/f14 registers). We need to copy parameters from
|
| - // a0-a3 registers to f12/f14 register pairs.
|
| - __ Move(f12, a0, a1);
|
| - __ Move(f14, a2, a3);
|
| - }
|
| - {
|
| - AllowExternalCallThatCantCauseGC scope(masm);
|
| - __ CallCFunction(
|
| - ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
|
| - }
|
| - // Store answer in the overwritable heap number.
|
| - if (!IsMipsSoftFloatABI) {
|
| - // Double returned in register f0.
|
| - __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
|
| - } else {
|
| - // Double returned in registers v0 and v1.
|
| - __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
|
| - __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
|
| - }
|
| - // Place heap_number_result in v0 and return to the pushed return address.
|
| - __ pop(ra);
|
| - __ Ret(USE_DELAY_SLOT);
|
| - __ mov(v0, heap_number_result);
|
| -}
|
| -
|
| -
|
| bool WriteInt32ToHeapNumberStub::IsPregenerated() {
|
| // These variants are compiled ahead of time. See next method.
|
| if (the_int_.is(a1) &&
|
| @@ -1472,6 +1188,42 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
|
| }
|
|
|
|
|
| +// Generates code to call a C function to do a double operation.
|
| +// This code never falls through, but returns with a heap number containing
|
| +// the result in v0.
|
| +// Register heap_number_result must be a heap number in which the
|
| +// result of the operation will be stored.
|
| +// Requires the following layout on entry:
|
| +// a0: Left value (least significant part of mantissa).
|
| +// a1: Left value (sign, exponent, top of mantissa).
|
| +// a2: Right value (least significant part of mantissa).
|
| +// a3: Right value (sign, exponent, top of mantissa).
|
| +static void CallCCodeForDoubleOperation(MacroAssembler* masm,
|
| + Token::Value op,
|
| + Register heap_number_result,
|
| + Register scratch) {
|
| + // Assert that heap_number_result is saved.
|
| + // We currently always use s0 to pass it.
|
| + ASSERT(heap_number_result.is(s0));
|
| +
|
| + // Push the current return address before the C call.
|
| + __ push(ra);
|
| + __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
|
| + {
|
| + AllowExternalCallThatCantCauseGC scope(masm);
|
| + __ CallCFunction(
|
| + ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
|
| + }
|
| + // Store answer in the overwritable heap number.
|
| + // Double returned in register f0.
|
| + __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
|
| + // Place heap_number_result in v0 and return to the pushed return address.
|
| + __ pop(ra);
|
| + __ Ret(USE_DELAY_SLOT);
|
| + __ mov(v0, heap_number_result);
|
| +}
|
| +
|
| +
|
| void BinaryOpStub::Initialize() {
|
| platform_specific_bit_ = true; // FPU is a base requirement for V8.
|
| }
|
| @@ -1699,49 +1451,41 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
|
| case Token::MUL:
|
| case Token::DIV:
|
| case Token::MOD: {
|
| - // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
|
| - // depending on operation.
|
| - FloatingPointHelper::Destination destination =
|
| - op != Token::MOD ?
|
| - FloatingPointHelper::kFPURegisters :
|
| - FloatingPointHelper::kCoreRegisters;
|
| -
|
| // Allocate new heap number for result.
|
| Register result = s0;
|
| BinaryOpStub_GenerateHeapResultAllocation(
|
| masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
|
|
|
| - // Load the operands.
|
| + // Load left and right operands into f12 and f14.
|
| if (smi_operands) {
|
| - FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
|
| + __ SmiUntag(scratch1, a0);
|
| + __ mtc1(scratch1, f14);
|
| + __ cvt_d_w(f14, f14);
|
| + __ SmiUntag(scratch1, a1);
|
| + __ mtc1(scratch1, f12);
|
| + __ cvt_d_w(f12, f12);
|
| } else {
|
| - // Load right operand to f14 or a2/a3.
|
| + // Load right operand to f14.
|
| if (right_type == BinaryOpIC::INT32) {
|
| - FloatingPointHelper::LoadNumberAsInt32Double(
|
| - masm, right, destination, f14, f16, a2, a3, heap_number_map,
|
| - scratch1, scratch2, f2, miss);
|
| + __ LoadNumberAsInt32Double(
|
| + right, f14, heap_number_map, scratch1, scratch2, f2, miss);
|
| } else {
|
| Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
|
| - FloatingPointHelper::LoadNumber(
|
| - masm, destination, right, f14, a2, a3, heap_number_map,
|
| - scratch1, scratch2, fail);
|
| + __ LoadNumber(right, f14, heap_number_map, scratch1, fail);
|
| }
|
| // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
|
| // jumps to |miss|.
|
| if (left_type == BinaryOpIC::INT32) {
|
| - FloatingPointHelper::LoadNumberAsInt32Double(
|
| - masm, left, destination, f12, f16, a0, a1, heap_number_map,
|
| - scratch1, scratch2, f2, miss);
|
| + __ LoadNumberAsInt32Double(
|
| + left, f12, heap_number_map, scratch1, scratch2, f2, miss);
|
| } else {
|
| Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
|
| - FloatingPointHelper::LoadNumber(
|
| - masm, destination, left, f12, a0, a1, heap_number_map,
|
| - scratch1, scratch2, fail);
|
| + __ LoadNumber(left, f12, heap_number_map, scratch1, fail);
|
| }
|
| }
|
|
|
| // Calculate the result.
|
| - if (destination == FloatingPointHelper::kFPURegisters) {
|
| + if (op != Token::MOD) {
|
| // Using FPU registers:
|
| // f12: Left value.
|
| // f14: Right value.
|
| @@ -1770,10 +1514,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
|
| __ mov(v0, result);
|
| } else {
|
| // Call the C function to handle the double operation.
|
| - FloatingPointHelper::CallCCodeForDoubleOperation(masm,
|
| - op,
|
| - result,
|
| - scratch1);
|
| + CallCCodeForDoubleOperation(masm, op, result, scratch1);
|
| if (FLAG_debug_code) {
|
| __ stop("Unreachable code.");
|
| }
|
| @@ -1791,24 +1532,12 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
|
| __ SmiUntag(a2, right);
|
| } else {
|
| // Convert operands to 32-bit integers. Right in a2 and left in a3.
|
| - FloatingPointHelper::ConvertNumberToInt32(masm,
|
| - left,
|
| - a3,
|
| - heap_number_map,
|
| - scratch1,
|
| - scratch2,
|
| - scratch3,
|
| - f0,
|
| - not_numbers);
|
| - FloatingPointHelper::ConvertNumberToInt32(masm,
|
| - right,
|
| - a2,
|
| - heap_number_map,
|
| - scratch1,
|
| - scratch2,
|
| - scratch3,
|
| - f0,
|
| - not_numbers);
|
| + __ ConvertNumberToInt32(
|
| + left, a3, heap_number_map,
|
| + scratch1, scratch2, scratch3, f0, not_numbers);
|
| + __ ConvertNumberToInt32(
|
| + right, a2, heap_number_map,
|
| + scratch1, scratch2, scratch3, f0, not_numbers);
|
| }
|
| Label result_not_a_smi;
|
| switch (op) {
|
| @@ -2042,36 +1771,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| // Load both operands and check that they are 32-bit integer.
|
| // Jump to type transition if they are not. The registers a0 and a1 (right
|
| // and left) are preserved for the runtime call.
|
| - FloatingPointHelper::Destination destination = (op_ != Token::MOD)
|
| - ? FloatingPointHelper::kFPURegisters
|
| - : FloatingPointHelper::kCoreRegisters;
|
| -
|
| - FloatingPointHelper::LoadNumberAsInt32Double(masm,
|
| - right,
|
| - destination,
|
| - f14,
|
| - f16,
|
| - a2,
|
| - a3,
|
| - heap_number_map,
|
| - scratch1,
|
| - scratch2,
|
| - f2,
|
| - &transition);
|
| - FloatingPointHelper::LoadNumberAsInt32Double(masm,
|
| - left,
|
| - destination,
|
| - f12,
|
| - f16,
|
| - t0,
|
| - t1,
|
| - heap_number_map,
|
| - scratch1,
|
| - scratch2,
|
| - f2,
|
| - &transition);
|
| -
|
| - if (destination == FloatingPointHelper::kFPURegisters) {
|
| +
|
| + __ LoadNumberAsInt32Double(
|
| + right, f14, heap_number_map, scratch1, scratch2, f2, &transition);
|
| + __ LoadNumberAsInt32Double(
|
| + left, f12, heap_number_map, scratch1, scratch2, f2, &transition);
|
| +
|
| + if (op_ != Token::MOD) {
|
| Label return_heap_number;
|
| switch (op_) {
|
| case Token::ADD:
|
| @@ -2148,10 +1854,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| __ BranchF(&transition, NULL, ne, f14, f16);
|
| }
|
|
|
| - // We preserved a0 and a1 to be able to call runtime.
|
| - // Save the left value on the stack.
|
| - __ Push(t1, t0);
|
| -
|
| Label pop_and_call_runtime;
|
|
|
| // Allocate a heap number to store the result.
|
| @@ -2164,12 +1866,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| &pop_and_call_runtime,
|
| mode_);
|
|
|
| - // Load the left value from the value saved on the stack.
|
| - __ Pop(a1, a0);
|
| -
|
| // Call the C function to handle the double operation.
|
| - FloatingPointHelper::CallCCodeForDoubleOperation(
|
| - masm, op_, heap_number_result, scratch1);
|
| + CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
|
| if (FLAG_debug_code) {
|
| __ stop("Unreachable code.");
|
| }
|
| @@ -2189,30 +1887,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| case Token::SHR:
|
| case Token::SHL: {
|
| Label return_heap_number;
|
| - Register scratch3 = t1;
|
| // Convert operands to 32-bit integers. Right in a2 and left in a3. The
|
| // registers a0 and a1 (right and left) are preserved for the runtime
|
| // call.
|
| - FloatingPointHelper::LoadNumberAsInt32(masm,
|
| - left,
|
| - a3,
|
| - heap_number_map,
|
| - scratch1,
|
| - scratch2,
|
| - scratch3,
|
| - f0,
|
| - f2,
|
| - &transition);
|
| - FloatingPointHelper::LoadNumberAsInt32(masm,
|
| - right,
|
| - a2,
|
| - heap_number_map,
|
| - scratch1,
|
| - scratch2,
|
| - scratch3,
|
| - f0,
|
| - f2,
|
| - &transition);
|
| + __ LoadNumberAsInt32(
|
| + left, a3, heap_number_map, scratch1, scratch2, f0, f2, &transition);
|
| + __ LoadNumberAsInt32(
|
| + right, a2, heap_number_map, scratch1, scratch2, f0, f2, &transition);
|
|
|
| // The ECMA-262 standard specifies that, for shift operations, only the
|
| // 5 least significant bits of the shift value should be used.
|
| @@ -7097,10 +6778,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
|
| // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
|
| __ bind(&double_elements);
|
| __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
|
| - __ StoreNumberToDoubleElements(a0, a3,
|
| - // Overwrites all regs after this.
|
| - t1, t2, t3, t5, a2,
|
| - &slow_elements);
|
| + __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
|
| __ Ret(USE_DELAY_SLOT);
|
| __ mov(v0, a0);
|
| }
|
|
|