| Index: src/mips/code-stubs-mips.cc
|
| diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
|
| index 5d0dbc467d9934b0ad30d0040752145ea9a5eaf6..2eefc82196f06fc8c51cbf267a9ee2fc090a8a37 100644
|
| --- a/src/mips/code-stubs-mips.cc
|
| +++ b/src/mips/code-stubs-mips.cc
|
| @@ -227,8 +227,20 @@ void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
|
| }
|
|
|
|
|
| +void UnaryOpStub::InitializeInterfaceDescriptor(
|
| + Isolate* isolate,
|
| + CodeStubInterfaceDescriptor* descriptor) {
|
| + static Register registers[] = { a0 };
|
| + descriptor->register_param_count_ = 1;
|
| + descriptor->register_params_ = registers;
|
| + descriptor->deoptimization_handler_ =
|
| + FUNCTION_ADDR(UnaryOpIC_Miss);
|
| +}
|
| +
|
| +
|
| #define __ ACCESS_MASM(masm)
|
|
|
| +
|
| static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
| Label* slow,
|
| Condition cc);
|
| @@ -1586,294 +1598,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
|
| }
|
|
|
|
|
| -void UnaryOpStub::PrintName(StringStream* stream) {
|
| - const char* op_name = Token::Name(op_);
|
| - const char* overwrite_name = NULL; // Make g++ happy.
|
| - switch (mode_) {
|
| - case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
|
| - case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
|
| - }
|
| - stream->Add("UnaryOpStub_%s_%s_%s",
|
| - op_name,
|
| - overwrite_name,
|
| - UnaryOpIC::GetName(operand_type_));
|
| -}
|
| -
|
| -
|
| -// TODO(svenpanne): Use virtual functions instead of switch.
|
| -void UnaryOpStub::Generate(MacroAssembler* masm) {
|
| - switch (operand_type_) {
|
| - case UnaryOpIC::UNINITIALIZED:
|
| - GenerateTypeTransition(masm);
|
| - break;
|
| - case UnaryOpIC::SMI:
|
| - GenerateSmiStub(masm);
|
| - break;
|
| - case UnaryOpIC::NUMBER:
|
| - GenerateNumberStub(masm);
|
| - break;
|
| - case UnaryOpIC::GENERIC:
|
| - GenerateGenericStub(masm);
|
| - break;
|
| - }
|
| -}
|
| -
|
| -
|
| -void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
|
| - // Argument is in a0 and v0 at this point, so we can overwrite a0.
|
| - __ li(a2, Operand(Smi::FromInt(op_)));
|
| - __ li(a1, Operand(Smi::FromInt(mode_)));
|
| - __ li(a0, Operand(Smi::FromInt(operand_type_)));
|
| - __ Push(v0, a2, a1, a0);
|
| -
|
| - __ TailCallExternalReference(
|
| - ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
|
| -}
|
| -
|
| -
|
| -// TODO(svenpanne): Use virtual functions instead of switch.
|
| -void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
|
| - switch (op_) {
|
| - case Token::SUB:
|
| - GenerateSmiStubSub(masm);
|
| - break;
|
| - case Token::BIT_NOT:
|
| - GenerateSmiStubBitNot(masm);
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| -}
|
| -
|
| -
|
| -void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
|
| - Label non_smi, slow;
|
| - GenerateSmiCodeSub(masm, &non_smi, &slow);
|
| - __ bind(&non_smi);
|
| - __ bind(&slow);
|
| - GenerateTypeTransition(masm);
|
| -}
|
| -
|
| -
|
| -void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
|
| - Label non_smi;
|
| - GenerateSmiCodeBitNot(masm, &non_smi);
|
| - __ bind(&non_smi);
|
| - GenerateTypeTransition(masm);
|
| -}
|
| -
|
| -
|
| -void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
|
| - Label* non_smi,
|
| - Label* slow) {
|
| - __ JumpIfNotSmi(a0, non_smi);
|
| -
|
| - // The result of negating zero or the smallest negative smi is not a smi.
|
| - __ And(t0, a0, ~0x80000000);
|
| - __ Branch(slow, eq, t0, Operand(zero_reg));
|
| -
|
| - // Return '0 - value'.
|
| - __ Ret(USE_DELAY_SLOT);
|
| - __ subu(v0, zero_reg, a0);
|
| -}
|
| -
|
| -
|
| -void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
|
| - Label* non_smi) {
|
| - __ JumpIfNotSmi(a0, non_smi);
|
| -
|
| - // Flip bits and revert inverted smi-tag.
|
| - __ Neg(v0, a0);
|
| - __ And(v0, v0, ~kSmiTagMask);
|
| - __ Ret();
|
| -}
|
| -
|
| -
|
| -// TODO(svenpanne): Use virtual functions instead of switch.
|
| -void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
|
| - switch (op_) {
|
| - case Token::SUB:
|
| - GenerateNumberStubSub(masm);
|
| - break;
|
| - case Token::BIT_NOT:
|
| - GenerateNumberStubBitNot(masm);
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| -}
|
| -
|
| -
|
| -void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
|
| - Label non_smi, slow, call_builtin;
|
| - GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
|
| - __ bind(&non_smi);
|
| - GenerateHeapNumberCodeSub(masm, &slow);
|
| - __ bind(&slow);
|
| - GenerateTypeTransition(masm);
|
| - __ bind(&call_builtin);
|
| - GenerateGenericCodeFallback(masm);
|
| -}
|
| -
|
| -
|
| -void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
|
| - Label non_smi, slow;
|
| - GenerateSmiCodeBitNot(masm, &non_smi);
|
| - __ bind(&non_smi);
|
| - GenerateHeapNumberCodeBitNot(masm, &slow);
|
| - __ bind(&slow);
|
| - GenerateTypeTransition(masm);
|
| -}
|
| -
|
| -
|
| -void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
|
| - Label* slow) {
|
| - EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
|
| - // a0 is a heap number. Get a new heap number in a1.
|
| - if (mode_ == UNARY_OVERWRITE) {
|
| - __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
|
| - __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
|
| - __ Ret(USE_DELAY_SLOT);
|
| - __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
|
| - } else {
|
| - Label slow_allocate_heapnumber, heapnumber_allocated;
|
| - __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
|
| - __ jmp(&heapnumber_allocated);
|
| -
|
| - __ bind(&slow_allocate_heapnumber);
|
| - {
|
| - FrameScope scope(masm, StackFrame::INTERNAL);
|
| - __ push(a0);
|
| - __ CallRuntime(Runtime::kNumberAlloc, 0);
|
| - __ mov(a1, v0);
|
| - __ pop(a0);
|
| - }
|
| -
|
| - __ bind(&heapnumber_allocated);
|
| - __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
|
| - __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
|
| - __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
|
| - __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
|
| - __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
|
| - __ Ret(USE_DELAY_SLOT);
|
| - __ mov(v0, a1);
|
| - }
|
| -}
|
| -
|
| -
|
| -void UnaryOpStub::GenerateHeapNumberCodeBitNot(
|
| - MacroAssembler* masm,
|
| - Label* slow) {
|
| - Label impossible;
|
| -
|
| - EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
|
| - // Convert the heap number in a0 to an untagged integer in a1.
|
| - __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
|
| -
|
| - // Do the bitwise operation and check if the result fits in a smi.
|
| - Label try_float;
|
| - __ Neg(a1, a1);
|
| - __ Addu(a2, a1, Operand(0x40000000));
|
| - __ Branch(&try_float, lt, a2, Operand(zero_reg));
|
| -
|
| - // Tag the result as a smi and we're done.
|
| - __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
|
| - __ SmiTag(v0, a1);
|
| -
|
| - // Try to store the result in a heap number.
|
| - __ bind(&try_float);
|
| - if (mode_ == UNARY_NO_OVERWRITE) {
|
| - Label slow_allocate_heapnumber, heapnumber_allocated;
|
| - // Allocate a new heap number without zapping v0, which we need if it fails.
|
| - __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
|
| - __ jmp(&heapnumber_allocated);
|
| -
|
| - __ bind(&slow_allocate_heapnumber);
|
| - {
|
| - FrameScope scope(masm, StackFrame::INTERNAL);
|
| - __ push(v0); // Push the heap number, not the untagged int32.
|
| - __ CallRuntime(Runtime::kNumberAlloc, 0);
|
| - __ mov(a2, v0); // Move the new heap number into a2.
|
| - // Get the heap number into v0, now that the new heap number is in a2.
|
| - __ pop(v0);
|
| - }
|
| -
|
| - // Convert the heap number in v0 to an untagged integer in a1.
|
| - // This can't go slow-case because it's the same number we already
|
| - // converted once again.
|
| - __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
|
| - // Negate the result.
|
| - __ Xor(a1, a1, -1);
|
| -
|
| - __ bind(&heapnumber_allocated);
|
| - __ mov(v0, a2); // Move newly allocated heap number to v0.
|
| - }
|
| -
|
| - // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
|
| - __ mtc1(a1, f0);
|
| - __ cvt_d_w(f0, f0);
|
| - __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
|
| - __ Ret();
|
| -
|
| - __ bind(&impossible);
|
| - if (FLAG_debug_code) {
|
| - __ stop("Incorrect assumption in bit-not stub");
|
| - }
|
| -}
|
| -
|
| -
|
| -// TODO(svenpanne): Use virtual functions instead of switch.
|
| -void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
|
| - switch (op_) {
|
| - case Token::SUB:
|
| - GenerateGenericStubSub(masm);
|
| - break;
|
| - case Token::BIT_NOT:
|
| - GenerateGenericStubBitNot(masm);
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| -}
|
| -
|
| -
|
| -void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
|
| - Label non_smi, slow;
|
| - GenerateSmiCodeSub(masm, &non_smi, &slow);
|
| - __ bind(&non_smi);
|
| - GenerateHeapNumberCodeSub(masm, &slow);
|
| - __ bind(&slow);
|
| - GenerateGenericCodeFallback(masm);
|
| -}
|
| -
|
| -
|
| -void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
|
| - Label non_smi, slow;
|
| - GenerateSmiCodeBitNot(masm, &non_smi);
|
| - __ bind(&non_smi);
|
| - GenerateHeapNumberCodeBitNot(masm, &slow);
|
| - __ bind(&slow);
|
| - GenerateGenericCodeFallback(masm);
|
| -}
|
| -
|
| -
|
| -void UnaryOpStub::GenerateGenericCodeFallback(
|
| - MacroAssembler* masm) {
|
| - // Handle the slow case by jumping to the JavaScript builtin.
|
| - __ push(a0);
|
| - switch (op_) {
|
| - case Token::SUB:
|
| - __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
|
| - break;
|
| - case Token::BIT_NOT:
|
| - __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| -}
|
| -
|
| -
|
| void BinaryOpStub::Initialize() {
|
| platform_specific_bit_ = true; // FPU is a base requirement for V8.
|
| }
|
|
|