Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(681)

Unified Diff: src/mips/code-stubs-mips.cc

Issue 26236004: Rollback of r17108, r17106, r17104 in trunk branch. (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/log.cc ('k') | src/objects.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/mips/code-stubs-mips.cc
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 3f59932d15da4b2f67d47aaed8e366d7be3511be..5cdfecc68e357d5e1a06ed8ebd040f9c69621763 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -1227,18 +1227,958 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-void BinaryOpStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a1, a0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+// Generates code to call a C function to do a double operation.
+// This code never falls through, but returns with a heap number containing
+// the result in v0.
+// Register heap_number_result must be a heap number in which the
+// result of the operation will be stored.
+// Requires the following layout on entry:
+// a0: Left value (least significant part of mantissa).
+// a1: Left value (sign, exponent, top of mantissa).
+// a2: Right value (least significant part of mantissa).
+// a3: Right value (sign, exponent, top of mantissa).
+static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch) {
+ // Assert that heap_number_result is saved.
+ // We currently always use s0 to pass it.
+ ASSERT(heap_number_result.is(s0));
+
+ // Push the current return address before the C call.
+ __ push(ra);
+ __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+ }
+ // Store answer in the overwritable heap number.
+ // Double returned in register f0.
+ __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ // Place heap_number_result in v0 and return to the pushed return address.
+ __ pop(ra);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, heap_number_result);
}
+void BinaryOpStub::Initialize() {
+ platform_specific_bit_ = true; // FPU is a base requirement for V8.
+}
+
+
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
+
+ __ Push(a1, a0);
+
+ __ li(a2, Operand(Smi::FromInt(MinorKey())));
+ __ push(a2);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+ masm->isolate()),
+ 3,
+ 1);
+}
+
+
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+ MacroAssembler* masm) {
+ UNIMPLEMENTED();
+}
+
+
+void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
+ Token::Value op) {
+ Register left = a1;
+ Register right = a0;
+
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+
+ ASSERT(right.is(a0));
+ STATIC_ASSERT(kSmiTag == 0);
+
+ Label not_smi_result;
+ switch (op) {
+ case Token::ADD:
+ __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+ __ RetOnNoOverflow(scratch1);
+ // No need to revert anything - right and left are intact.
+ break;
+ case Token::SUB:
+ __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+ __ RetOnNoOverflow(scratch1);
+ // No need to revert anything - right and left are intact.
+ break;
+ case Token::MUL: {
+ // Remove tag from one of the operands. This way the multiplication result
+ // will be a smi if it fits the smi range.
+ __ SmiUntag(scratch1, right);
+ // Do multiplication.
+ // lo = lower 32 bits of scratch1 * left.
+ // hi = higher 32 bits of scratch1 * left.
+ __ Mult(left, scratch1);
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ mflo(scratch1);
+ __ mfhi(scratch2);
+ __ sra(scratch1, scratch1, 31);
+ __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
+ // Go slow on zero result to handle -0.
+ __ mflo(v0);
+ __ Ret(ne, v0, Operand(zero_reg));
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ Addu(scratch2, right, left);
+ Label skip;
+ // ARM uses the 'pl' condition, which is 'ge'.
+ // Negating it results in 'lt'.
+ __ Branch(&skip, lt, scratch2, Operand(zero_reg));
+ ASSERT(Smi::FromInt(0) == 0);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
+ __ bind(&skip);
+ // We fall through here if we multiplied a negative number with 0, because
+ // that would mean we should produce -0.
+ }
+ break;
+ case Token::DIV: {
+ Label done;
+ __ SmiUntag(scratch2, right);
+ __ SmiUntag(scratch1, left);
+ __ Div(scratch1, scratch2);
+ // A minor optimization: div may be calculated asynchronously, so we check
+ // for division by zero before getting the result.
+ __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
+ // If the result is 0, we need to make sure the dividsor (right) is
+ // positive, otherwise it is a -0 case.
+ // Quotient is in 'lo', remainder is in 'hi'.
+ // Check for no remainder first.
+ __ mfhi(scratch1);
+ __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
+ __ mflo(scratch1);
+ __ Branch(&done, ne, scratch1, Operand(zero_reg));
+ __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+ __ bind(&done);
+ // Check that the signed result fits in a Smi.
+ __ Addu(scratch2, scratch1, Operand(0x40000000));
+ __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+ __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
+ __ SmiTag(v0, scratch1);
+ }
+ break;
+ case Token::MOD: {
+ Label done;
+ __ SmiUntag(scratch2, right);
+ __ SmiUntag(scratch1, left);
+ __ Div(scratch1, scratch2);
+ // A minor optimization: div may be calculated asynchronously, so we check
+ // for division by 0 before calling mfhi.
+ // Check for zero on the right hand side.
+ __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
+ // If the result is 0, we need to make sure the dividend (left) is
+ // positive (or 0), otherwise it is a -0 case.
+ // Remainder is in 'hi'.
+ __ mfhi(scratch2);
+ __ Branch(&done, ne, scratch2, Operand(zero_reg));
+ __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
+ __ bind(&done);
+ // Check that the signed result fits in a Smi.
+ __ Addu(scratch1, scratch2, Operand(0x40000000));
+ __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
+ __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
+ __ SmiTag(v0, scratch2);
+ }
+ break;
+ case Token::BIT_OR:
+ __ Ret(USE_DELAY_SLOT);
+ __ or_(v0, left, right);
+ break;
+ case Token::BIT_AND:
+ __ Ret(USE_DELAY_SLOT);
+ __ and_(v0, left, right);
+ break;
+ case Token::BIT_XOR:
+ __ Ret(USE_DELAY_SLOT);
+ __ xor_(v0, left, right);
+ break;
+ case Token::SAR:
+ // Remove tags from right operand.
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ srav(scratch1, left, scratch1);
+ // Smi tag result.
+ __ And(v0, scratch1, ~kSmiTagMask);
+ __ Ret();
+ break;
+ case Token::SHR:
+ // Remove tags from operands. We can't do this on a 31 bit number
+ // because then the 0s get shifted into bit 30 instead of bit 31.
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ srlv(v0, scratch1, scratch2);
+ // Unsigned shift is not allowed to produce a negative number, so
+ // check the sign bit and the sign bit after Smi tagging.
+ __ And(scratch1, v0, Operand(0xc0000000));
+ __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
+ // Smi tag result.
+ __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
+ __ SmiTag(v0);
+ break;
+ case Token::SHL:
+ // Remove tags from operands.
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ sllv(scratch1, scratch1, scratch2);
+ // Check that the signed result fits in a Smi.
+ __ Addu(scratch2, scratch1, Operand(0x40000000));
+ __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+ __ Ret(USE_DELAY_SLOT);
+ __ SmiTag(v0, scratch1); // SmiTag emits one instruction in delay slot.
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(&not_smi_result);
+}
+
+
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode);
+
+
+void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required,
+ Label* miss,
+ Token::Value op,
+ OverwriteMode mode) {
+ Register left = a1;
+ Register right = a0;
+ Register scratch1 = t3;
+ Register scratch2 = t5;
+
+ ASSERT(smi_operands || (not_numbers != NULL));
+ if (smi_operands) {
+ __ AssertSmi(left);
+ __ AssertSmi(right);
+ }
+ if (left_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, miss);
+ }
+ if (right_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, miss);
+ }
+
+ Register heap_number_map = t2;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ switch (op) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // Allocate new heap number for result.
+ Register result = s0;
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
+
+ // Load left and right operands into f12 and f14.
+ if (smi_operands) {
+ __ SmiUntag(scratch1, a0);
+ __ mtc1(scratch1, f14);
+ __ cvt_d_w(f14, f14);
+ __ SmiUntag(scratch1, a1);
+ __ mtc1(scratch1, f12);
+ __ cvt_d_w(f12, f12);
+ } else {
+ // Load right operand to f14.
+ if (right_type == BinaryOpIC::INT32) {
+ __ LoadNumberAsInt32Double(
+ right, f14, heap_number_map, scratch1, scratch2, f2, miss);
+ } else {
+ Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
+ __ LoadNumber(right, f14, heap_number_map, scratch1, fail);
+ }
+ // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
+ // jumps to |miss|.
+ if (left_type == BinaryOpIC::INT32) {
+ __ LoadNumberAsInt32Double(
+ left, f12, heap_number_map, scratch1, scratch2, f2, miss);
+ } else {
+ Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
+ __ LoadNumber(left, f12, heap_number_map, scratch1, fail);
+ }
+ }
+
+ // Calculate the result.
+ if (op != Token::MOD) {
+ // Using FPU registers:
+ // f12: Left value.
+ // f14: Right value.
+ switch (op) {
+ case Token::ADD:
+ __ add_d(f10, f12, f14);
+ break;
+ case Token::SUB:
+ __ sub_d(f10, f12, f14);
+ break;
+ case Token::MUL:
+ __ mul_d(f10, f12, f14);
+ break;
+ case Token::DIV:
+ __ div_d(f10, f12, f14);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // ARM uses a workaround here because of the unaligned HeapNumber
+ // kValueOffset. On MIPS this workaround is built into sdc1 so
+ // there's no point in generating even more instructions.
+ __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, result);
+ } else {
+ // Call the C function to handle the double operation.
+ CallCCodeForDoubleOperation(masm, op, result, scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
+ }
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ if (smi_operands) {
+ __ SmiUntag(a3, left);
+ __ SmiUntag(a2, right);
+ } else {
+ // Convert operands to 32-bit integers. Right in a2 and left in a3.
+ __ TruncateNumberToI(left, a3, heap_number_map, scratch1, not_numbers);
+ __ TruncateNumberToI(right, a2, heap_number_map, scratch1, not_numbers);
+ }
+ Label result_not_a_smi;
+ switch (op) {
+ case Token::BIT_OR:
+ __ Or(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_XOR:
+ __ Xor(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_AND:
+ __ And(a2, a3, Operand(a2));
+ break;
+ case Token::SAR:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(a2, a2, 5);
+ __ srav(a2, a3, a2);
+ break;
+ case Token::SHR:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(a2, a2, 5);
+ __ srlv(a2, a3, a2);
+ // SHR is special because it is required to produce a positive answer.
+ // The code below for writing into heap numbers isn't capable of
+ // writing the register as an unsigned int so we go to slow case if we
+ // hit this case.
+ __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
+ break;
+ case Token::SHL:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(a2, a2, 5);
+ __ sllv(a2, a3, a2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // Check that the *signed* result fits in a smi.
+ __ Addu(a3, a2, Operand(0x40000000));
+ __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
+ __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
+ __ SmiTag(v0, a2);
+
+ // Allocate new heap number for result.
+ __ bind(&result_not_a_smi);
+ Register result = t1;
+ if (smi_operands) {
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ } else {
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required,
+ mode);
+ }
+
+ // a2: Answer as signed int32.
+ // t1: Heap number to write answer into.
+
+ // Nothing can go wrong now, so move the heap number to v0, which is the
+ // result.
+ __ mov(v0, t1);
+ // Convert the int32 in a2 to the heap number in a0. As
+ // mentioned above SHR needs to always produce a positive result.
+ __ mtc1(a2, f0);
+ if (op == Token::SHR) {
+ __ Cvt_d_uw(f0, f0, f22);
+ } else {
+ __ cvt_d_w(f0, f0);
+ }
+ // ARM uses a workaround here because of the unaligned HeapNumber
+ // kValueOffset. On MIPS this workaround is built into sdc1 so
+ // there's no point in generating even more instructions.
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+// Generate the smi code. If the operation on smis are successful this return is
+// generated. If the result is not a smi and heap number allocation is not
+// requested the code falls through. If number allocation is requested but a
+// heap number cannot be allocated the code jumps to the label gc_required.
+void BinaryOpStub_GenerateSmiCode(
+ MacroAssembler* masm,
+ Label* use_runtime,
+ Label* gc_required,
+ Token::Value op,
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ OverwriteMode mode) {
+ Label not_smis;
+
+ Register left = a1;
+ Register right = a0;
+ Register scratch1 = t3;
+
+ // Perform combined smi check on both operands.
+ __ Or(scratch1, left, Operand(right));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(scratch1, &not_smis);
+
+ // If the smi-smi operation results in a smi return is generated.
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op);
+
+ // If heap number results are possible generate the result in an allocated
+ // heap number.
+ if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
+ BinaryOpStub_GenerateFPOperation(
+ masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
+ use_runtime, gc_required, &not_smis, op, mode);
+ }
+ __ bind(&not_smis);
+}
+
+
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label right_arg_changed, call_runtime;
+
+ if (op_ == Token::MOD && encoded_right_arg_.has_value) {
+ // It is guaranteed that the value will fit into a Smi, because if it
+ // didn't, we wouldn't be here, see BinaryOp_Patch.
+ __ Branch(&right_arg_changed,
+ ne,
+ a0,
+ Operand(Smi::FromInt(fixed_right_arg_value())));
+ }
+
+ if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+ result_type_ == BinaryOpIC::SMI) {
+ // Only allow smi results.
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
+ } else {
+ // Allow heap number result and don't make a transition if a heap number
+ // cannot be allocated.
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
+ mode_);
+ }
+
+ // Code falls through if the result is not returned as either a smi or heap
+ // number.
+ __ bind(&right_arg_changed);
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
+}
+
+
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ // If both arguments are strings, call the string add stub.
+ // Otherwise, do a transition.
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &call_runtime);
+ __ GetObjectType(left, a2, a2);
+ __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ // Test if right operand is a string.
+ __ JumpIfSmi(right, &call_runtime);
+ __ GetObjectType(right, a2, a2);
+ __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ StringAddStub string_add_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&call_runtime);
+ GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
+
+ Register left = a1;
+ Register right = a0;
+ Register scratch1 = t3;
+ Register scratch2 = t5;
+ FPURegister double_scratch = f0;
+ FPURegister single_scratch = f6;
+
+ Register heap_number_result = no_reg;
+ Register heap_number_map = t2;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ Label call_runtime;
+ // Labels for type transition, used for wrong input or output types.
+ // Both label are currently actually bound to the same position. We use two
+ // different label to differentiate the cause leading to type transition.
+ Label transition;
+
+ // Smi-smi fast case.
+ Label skip;
+ __ Or(scratch1, left, right);
+ __ JumpIfNotSmi(scratch1, &skip);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
+ // Fall through if the result is not a smi.
+ __ bind(&skip);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, &transition);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, &transition);
+ }
+ // Load both operands and check that they are 32-bit integer.
+ // Jump to type transition if they are not. The registers a0 and a1 (right
+ // and left) are preserved for the runtime call.
+
+ __ LoadNumberAsInt32Double(
+ right, f14, heap_number_map, scratch1, scratch2, f2, &transition);
+ __ LoadNumberAsInt32Double(
+ left, f12, heap_number_map, scratch1, scratch2, f2, &transition);
+
+ if (op_ != Token::MOD) {
+ Label return_heap_number;
+ switch (op_) {
+ case Token::ADD:
+ __ add_d(f10, f12, f14);
+ break;
+ case Token::SUB:
+ __ sub_d(f10, f12, f14);
+ break;
+ case Token::MUL:
+ __ mul_d(f10, f12, f14);
+ break;
+ case Token::DIV:
+ __ div_d(f10, f12, f14);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (result_type_ <= BinaryOpIC::INT32) {
+ Register except_flag = scratch2;
+ const FPURoundingMode kRoundingMode = op_ == Token::DIV ?
+ kRoundToMinusInf : kRoundToZero;
+ const CheckForInexactConversion kConversion = op_ == Token::DIV ?
+ kCheckForInexactConversion : kDontCheckForInexactConversion;
+ __ EmitFPUTruncate(kRoundingMode,
+ scratch1,
+ f10,
+ at,
+ f16,
+ except_flag,
+ kConversion);
+ // If except_flag != 0, result does not fit in a 32-bit integer.
+ __ Branch(&transition, ne, except_flag, Operand(zero_reg));
+ // Try to tag the result as a Smi, return heap number on overflow.
+ __ SmiTagCheckOverflow(scratch1, scratch1, scratch2);
+ __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
+ // Check for minus zero, transition in that case (because we need
+ // to return a heap number).
+ Label not_zero;
+ ASSERT(kSmiTag == 0);
+ __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
+ __ mfc1(scratch2, f11);
+ __ And(scratch2, scratch2, HeapNumber::kSignMask);
+ __ Branch(&transition, ne, scratch2, Operand(zero_reg));
+ __ bind(&not_zero);
+
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, scratch1);
+ }
+
+ __ bind(&return_heap_number);
+ // Return a heap number, or fall through to type transition or runtime
+ // call if we can't.
+ // We are using FPU registers so s0 is available.
+ heap_number_result = s0;
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
+ __ sdc1(f10,
+ FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, heap_number_result);
+
+ // A DIV operation expecting an integer result falls through
+ // to type transition.
+
+ } else {
+ if (encoded_right_arg_.has_value) {
+ __ Move(f16, fixed_right_arg_value());
+ __ BranchF(&transition, NULL, ne, f14, f16);
+ }
+
+ Label pop_and_call_runtime;
+
+ // Allocate a heap number to store the result.
+ heap_number_result = s0;
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &pop_and_call_runtime,
+ mode_);
+
+ // Call the C function to handle the double operation.
+ CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
+
+ __ bind(&pop_and_call_runtime);
+ __ Drop(2);
+ __ Branch(&call_runtime);
+ }
+
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ Label return_heap_number;
+ // Convert operands to 32-bit integers. Right in a2 and left in a3. The
+ // registers a0 and a1 (right and left) are preserved for the runtime
+ // call.
+ __ LoadNumberAsInt32(
+ left, a3, heap_number_map, scratch1, scratch2, f0, f2, &transition);
+ __ LoadNumberAsInt32(
+ right, a2, heap_number_map, scratch1, scratch2, f0, f2, &transition);
+
+ // The ECMA-262 standard specifies that, for shift operations, only the
+ // 5 least significant bits of the shift value should be used.
+ switch (op_) {
+ case Token::BIT_OR:
+ __ Or(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_XOR:
+ __ Xor(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_AND:
+ __ And(a2, a3, Operand(a2));
+ break;
+ case Token::SAR:
+ __ And(a2, a2, Operand(0x1f));
+ __ srav(a2, a3, a2);
+ break;
+ case Token::SHR:
+ __ And(a2, a2, Operand(0x1f));
+ __ srlv(a2, a3, a2);
+ // SHR is special because it is required to produce a positive answer.
+ // We only get a negative result if the shift value (a2) is 0.
+ // This result cannot be respresented as a signed 32-bit integer, try
+ // to return a heap number if we can.
+ __ Branch((result_type_ <= BinaryOpIC::INT32)
+ ? &transition
+ : &return_heap_number,
+ lt,
+ a2,
+ Operand(zero_reg));
+ break;
+ case Token::SHL:
+ __ And(a2, a2, Operand(0x1f));
+ __ sllv(a2, a3, a2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Check if the result fits in a smi.
+ __ Addu(scratch1, a2, Operand(0x40000000));
+ // If not try to return a heap number. (We know the result is an int32.)
+ __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
+ // Tag the result and return.
+ __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
+ __ SmiTag(v0, a2);
+
+ __ bind(&return_heap_number);
+ heap_number_result = t1;
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
+
+ if (op_ != Token::SHR) {
+ // Convert the result to a floating point value.
+ __ mtc1(a2, double_scratch);
+ __ cvt_d_w(double_scratch, double_scratch);
+ } else {
+ // The result must be interpreted as an unsigned 32-bit integer.
+ __ mtc1(a2, double_scratch);
+ __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
+ }
+
+ // Store the result.
+ __ sdc1(double_scratch,
+ FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, heap_number_result);
+
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+
+ // We never expect DIV to yield an integer result, so we always generate
+ // type transition code for DIV operations expecting an integer result: the
+ // code will fall through to this type transition.
+ if (transition.is_linked() ||
+ ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
+ }
+
+ __ bind(&call_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
+}
+
+
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (op_ == Token::ADD) {
+ // Handle string addition here, because it is the only operation
+ // that does not do a ToNumber conversion on the operands.
+ GenerateAddStrings(masm);
+ }
+
+ // Convert oddball arguments to numbers.
+ Label check, done;
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&check, ne, a1, Operand(t0));
+ if (Token::IsBitOp(op_)) {
+ __ li(a1, Operand(Smi::FromInt(0)));
+ } else {
+ __ LoadRoot(a1, Heap::kNanValueRootIndex);
+ }
+ __ jmp(&done);
+ __ bind(&check);
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&done, ne, a0, Operand(t0));
+ if (Token::IsBitOp(op_)) {
+ __ li(a0, Operand(Smi::FromInt(0)));
+ } else {
+ __ LoadRoot(a0, Heap::kNanValueRootIndex);
+ }
+ __ bind(&done);
+
+ GenerateNumberStub(masm);
+}
+
+
+void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
+ Label call_runtime, transition;
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &transition, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
+}
+
+
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+ Label call_runtime, call_string_add_or_runtime, transition;
+
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
+
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_string_add_or_runtime);
+ if (op_ == Token::ADD) {
+ GenerateAddStrings(masm);
+ }
+
+ __ bind(&call_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
+}
+
+
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD);
+ Label left_not_string, call_runtime;
+
+ Register left = a1;
+ Register right = a0;
+
+ // Check if left argument is a string.
+ __ JumpIfSmi(left, &left_not_string);
+ __ GetObjectType(left, a2, a2);
+ __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ StringAddStub string_add_left_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_left_stub);
+
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
+ __ JumpIfSmi(right, &call_runtime);
+ __ GetObjectType(right, a2, a2);
+ __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ StringAddStub string_add_right_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_right_stub);
+
+ // At least one argument is not a string.
+ __ bind(&call_runtime);
+}
+
+
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode) {
+ // Code below will scratch result if allocation fails. To keep both arguments
+ // intact for the runtime call result cannot be one of these.
+ ASSERT(!result.is(a0) && !result.is(a1));
+
+ if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
+ Label skip_allocation, allocated;
+ Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0;
+ // If the overwritable operand is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
+ // Allocate a heap number for the result.
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ __ Branch(&allocated);
+ __ bind(&skip_allocation);
+ // Use object holding the overwritable operand for result.
+ __ mov(result, overwritable_operand);
+ __ bind(&allocated);
+ } else {
+ ASSERT(mode == NO_OVERWRITE);
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ }
+}
+
+
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ Push(a1, a0);
+}
+
+
+
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Untagged case: double input in f4, double result goes
// into f4.
@@ -1708,7 +2648,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
- BinaryOpStub::GenerateAheadOfTime(isolate);
}
« no previous file with comments | « src/log.cc ('k') | src/objects.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698