Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1494)

Unified Diff: src/arm/codegen-arm.cc

Issue 2843049: Simplify the transitions in the Binary Op ICs. Now a single call... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « no previous file | src/ia32/codegen-ia32.cc » ('j') | src/ia32/codegen-ia32.cc » ('J')
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/arm/codegen-arm.cc
===================================================================
--- src/arm/codegen-arm.cc (revision 5022)
+++ src/arm/codegen-arm.cc (working copy)
@@ -7648,8 +7648,10 @@
__ Swap(r0, r1, ip);
}
+ // The type transition also calculates the answer.
+ bool generate_code_to_calculate_answer = true;
+
if (ShouldGenerateFPCode()) {
- Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
switch (op_) {
@@ -7657,247 +7659,252 @@
case Token::SUB:
case Token::MUL:
case Token::DIV:
- GenerateTypeTransition(masm);
+ GenerateTypeTransition(masm); // Tail call.
+ generate_code_to_calculate_answer = false;
break;
default:
break;
}
- // Restore heap number map register.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
}
- if (mode_ == NO_OVERWRITE) {
- // In the case where there is no chance of an overwritable float we may as
- // well do the allocation immediately while r0 and r1 are untouched.
- __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
- }
+ if (generate_code_to_calculate_answer) {
+ Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
+ if (mode_ == NO_OVERWRITE) {
+ // In the case where there is no chance of an overwritable float we may as
+ // well do the allocation immediately while r0 and r1 are untouched.
+ __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
+ }
- // Move r0 to a double in r2-r3.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- if (mode_ == OVERWRITE_RIGHT) {
- __ mov(r5, Operand(r0)); // Overwrite this heap number.
- }
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r0 to d7.
- __ sub(r7, r0, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that second double is in r2 and r3.
- __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
- }
- __ jmp(&finished_loading_r0);
- __ bind(&r0_is_smi);
- if (mode_ == OVERWRITE_RIGHT) {
- // We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
+ // Move r0 to a double in r2-r3.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
+ __ b(ne, &slow);
+ if (mode_ == OVERWRITE_RIGHT) {
+ __ mov(r5, Operand(r0)); // Overwrite this heap number.
+ }
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber r0 to d7.
+ __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ } else {
+ // Calling convention says that second double is in r2 and r3.
+ __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ }
+ __ jmp(&finished_loading_r0);
+ __ bind(&r0_is_smi);
+ if (mode_ == OVERWRITE_RIGHT) {
+ // We can't overwrite a Smi so get address of new heap number into r5.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
+ }
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi in r0 to double in d7.
- __ mov(r7, Operand(r0, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt_f64_s32(d7, s15);
- if (!use_fp_registers) {
- __ vmov(r2, r3, d7);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Convert smi in r0 to double in d7.
+ __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt_f64_s32(d7, s15);
+ if (!use_fp_registers) {
+ __ vmov(r2, r3, d7);
+ }
+ } else {
+ // Write Smi from r0 to r3 and r2 in double format.
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub3(r3, r2, r7, r4);
+ __ push(lr);
+ __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
}
- } else {
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r4);
- __ push(lr);
- __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
- // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
- Label r1_is_not_smi;
- if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &r1_is_not_smi);
- GenerateTypeTransition(masm);
- // Restore heap number map register.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&r1_is_smi);
- }
+ // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
+ // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
+ Label r1_is_not_smi;
+ if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(ne, &r1_is_not_smi);
+ GenerateTypeTransition(masm); // Tail call.
+ }
- __ bind(&finished_loading_r0);
+ __ bind(&finished_loading_r0);
- // Move r1 to a double in r0-r1.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
- __ bind(&r1_is_not_smi);
- __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- if (mode_ == OVERWRITE_LEFT) {
- __ mov(r5, Operand(r1)); // Overwrite this heap number.
- }
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r1 to d6.
- __ sub(r7, r1, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that first double is in r0 and r1.
- __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
- }
- __ jmp(&finished_loading_r1);
- __ bind(&r1_is_smi);
- if (mode_ == OVERWRITE_LEFT) {
- // We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
+ // Move r1 to a double in r0-r1.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
+ __ bind(&r1_is_not_smi);
+ __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
+ __ b(ne, &slow);
+ if (mode_ == OVERWRITE_LEFT) {
+ __ mov(r5, Operand(r1)); // Overwrite this heap number.
+ }
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber r1 to d6.
+ __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ } else {
+ // Calling convention says that first double is in r0 and r1.
+ __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ }
+ __ jmp(&finished_loading_r1);
+ __ bind(&r1_is_smi);
+ if (mode_ == OVERWRITE_LEFT) {
+ // We can't overwrite a Smi so get address of new heap number into r5.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
+ }
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi in r1 to double in d6.
- __ mov(r7, Operand(r1, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt_f64_s32(d6, s13);
- if (!use_fp_registers) {
- __ vmov(r0, r1, d6);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Convert smi in r1 to double in d6.
+ __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt_f64_s32(d6, s13);
+ if (!use_fp_registers) {
+ __ vmov(r0, r1, d6);
+ }
+ } else {
+ // Write Smi from r1 to r1 and r0 in double format.
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub4(r1, r0, r7, r9);
+ __ push(lr);
+ __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
}
- } else {
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r9);
- __ push(lr);
- __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+
+ __ bind(&finished_loading_r1);
}
- __ bind(&finished_loading_r1);
+ if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
+ __ bind(&do_the_call);
+ // If we are inlining the operation using VFP3 instructions for
+ // add, subtract, multiply, or divide, the arguments are in d6 and d7.
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions to implement
+ // double precision, add, subtract, multiply, divide.
- __ bind(&do_the_call);
- // If we are inlining the operation using VFP3 instructions for
- // add, subtract, multiply, or divide, the arguments are in d6 and d7.
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // ARMv7 VFP3 instructions to implement
- // double precision, add, subtract, multiply, divide.
-
- if (Token::MUL == op_) {
- __ vmul(d5, d6, d7);
- } else if (Token::DIV == op_) {
- __ vdiv(d5, d6, d7);
- } else if (Token::ADD == op_) {
- __ vadd(d5, d6, d7);
- } else if (Token::SUB == op_) {
- __ vsub(d5, d6, d7);
+ if (Token::MUL == op_) {
+ __ vmul(d5, d6, d7);
+ } else if (Token::DIV == op_) {
+ __ vdiv(d5, d6, d7);
+ } else if (Token::ADD == op_) {
+ __ vadd(d5, d6, d7);
+ } else if (Token::SUB == op_) {
+ __ vsub(d5, d6, d7);
+ } else {
+ UNREACHABLE();
+ }
+ __ sub(r0, r5, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
+ __ mov(pc, lr);
} else {
- UNREACHABLE();
- }
- __ sub(r0, r5, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ mov(pc, lr);
- } else {
- // If we did not inline the operation, then the arguments are in:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- // r5: Address of heap number for result.
+ // If we did not inline the operation, then the arguments are in:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+ // r5: Address of heap number for result.
- __ push(lr); // For later.
- __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
- // Call C routine that may not cause GC or other trouble. r5 is callee
- // save.
- __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
- // Store answer in the overwritable heap number.
- #if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as register
- // cr8. Offsets must be divisible by 4 for coprocessor so we need to
- // substract the tag from r5.
- __ sub(r4, r5, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
- #else
- // Double returned in registers 0 and 1.
- __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
- #endif
- __ mov(r0, Operand(r5));
- // And we are done.
- __ pop(pc);
+ __ push(lr); // For later.
+ __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
+ // Call C routine that may not cause GC or other trouble. r5 is callee
+ // save.
+ __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+ // Store answer in the overwritable heap number.
+ #if !defined(USE_ARM_EABI)
+ // Double returned in fp coprocessor register 0 and 1, encoded as register
+ // cr8. Offsets must be divisible by 4 for coprocessor so we need to
+ // substract the tag from r5.
+ __ sub(r4, r5, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
+ #else
+ // Double returned in registers 0 and 1.
+ __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
+ #endif
+ __ mov(r0, Operand(r5));
+ // And we are done.
+ __ pop(pc);
+ }
}
}
if (lhs.is(r0)) {
- __ b(&slow);
- __ bind(&slow_reverse);
- __ Swap(r0, r1, ip);
+ if (generate_code_to_calculate_answer || slow_reverse.is_linked()) {
Kasper Lund 2010/07/06 11:56:35 Maybe just move the if (lhs.is(r0)) code into the
Erik Corry 2010/07/06 12:53:21 Done.
+ __ b(&slow);
+ __ bind(&slow_reverse);
+ __ Swap(r0, r1, ip);
+ }
}
heap_number_map = no_reg; // Don't use this any more from here on.
// We jump to here if something goes wrong (one param is not a number of any
// sort or new-space allocation fails).
- __ bind(&slow);
+ if (generate_code_to_calculate_answer || slow.is_linked()) {
Kasper Lund 2010/07/06 11:56:35 I would consider negating the condition and return
Erik Corry 2010/07/06 12:53:21 Done.
+ __ bind(&slow);
- // Push arguments to the stack
- __ Push(r1, r0);
+ // Push arguments to the stack
+ __ Push(r1, r0);
- if (Token::ADD == op_) {
- // Test for string arguments before calling runtime.
- // r1 : first argument
- // r0 : second argument
- // sp[0] : second argument
- // sp[4] : first argument
+ if (Token::ADD == op_) {
+ // Test for string arguments before calling runtime.
+ // r1 : first argument
+ // r0 : second argument
+ // sp[0] : second argument
+ // sp[4] : first argument
- Label not_strings, not_string1, string1, string1_smi2;
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &not_string1);
- __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &not_string1);
+ Label not_strings, not_string1, string1, string1_smi2;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &not_string1);
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &not_string1);
- // First argument is a a string, test second.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &string1_smi2);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &string1);
+ // First argument is a a string, test second.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &string1_smi2);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &string1);
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
+ // First and second argument are strings.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&string_add_stub);
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, r0, r2, r4, r5, r6, true, &string1);
+ __ bind(&string1_smi2);
+ // First argument is a string, second is a smi. Try to lookup the number
+ // string for the smi in the number string cache.
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm, r0, r2, r4, r5, r6, true, &string1);
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ str(r2, MemOperand(sp, 0));
- __ TailCallStub(&string_add_stub);
+ // Replace second argument on stack and tailcall string add stub to make
+ // the result.
+ __ str(r2, MemOperand(sp, 0));
+ __ TailCallStub(&string_add_stub);
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
- // First argument was not a string, test second.
- __ bind(&not_string1);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &not_strings);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &not_strings);
+ // First argument was not a string, test second.
+ __ bind(&not_string1);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &not_strings);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &not_strings);
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
- __ bind(&not_strings);
- }
+ __ bind(&not_strings);
+ }
- __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
+ __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
+ }
}
@@ -8752,29 +8759,15 @@
__ Push(r1, r0);
- // Internal frame is necessary to handle exceptions properly.
- __ EnterInternalFrame();
- // Call the stub proper to get the result in r0.
- __ Call(&get_result);
- __ LeaveInternalFrame();
-
- __ push(r0);
-
- __ mov(r0, Operand(Smi::FromInt(MinorKey())));
- __ push(r0);
- __ mov(r0, Operand(Smi::FromInt(op_)));
- __ push(r0);
+ __ mov(r2, Operand(Smi::FromInt(MinorKey())));
+ __ mov(r1, Operand(Smi::FromInt(op_)));
__ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
- __ push(r0);
+ __ Push(r2, r1, r0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 6,
+ 5,
1);
-
- // The entry point for the result calculation is assumed to be immediately
- // after this sequence.
- __ bind(&get_result);
}
« no previous file with comments | « no previous file | src/ia32/codegen-ia32.cc » ('j') | src/ia32/codegen-ia32.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698