Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(487)

Unified Diff: src/x64/codegen-x64.cc

Issue 845002: Porting binary op ICs to x64. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/x64/codegen-x64.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/x64/codegen-x64.cc
===================================================================
--- src/x64/codegen-x64.cc (revision 4106)
+++ src/x64/codegen-x64.cc (working copy)
@@ -8336,14 +8336,15 @@
}
OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s%s",
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s%s_%s",
op_name,
overwrite_name,
(flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
args_in_registers_ ? "RegArgs" : "StackArgs",
args_reversed_ ? "_R" : "",
use_sse3_ ? "SSE3" : "SSE2",
- operands_type_.ToString());
+ static_operands_type_.ToString(),
+ BinaryOpIC::GetName(runtime_operands_type_));
return name_;
}
@@ -8654,20 +8655,35 @@
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
- if (HasSmiCodeInStub()) {
+
+ if (ShouldGenerateSmiCode()) {
GenerateSmiCode(masm, &call_runtime);
} else if (op_ != Token::MOD) {
- GenerateLoadArguments(masm);
+ if (!HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
}
// Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- // rax: y
- // rdx: x
- if (operands_type_.IsNumber()) {
+ if (ShouldGenerateFPCode()) {
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-smi argument occurs
+ // (and only if smi code is generated). This is the right moment to
+ // patch to HEAP_NUMBERS state. The transition is attempted only for
+ // the four basic operations. The stub stays in the DEFAULT state
+ // forever for all other operations (also if smi code is skipped).
+ GenerateTypeTransition(masm);
+ }
+
+ Label not_floats;
+ // rax: y
+ // rdx: x
+ if (static_operands_type_.IsNumber()) {
if (FLAG_debug_code) {
// Assert at runtime that inputs are only numbers.
__ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
@@ -8676,118 +8692,132 @@
} else {
FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
}
- // Fast-case: Both operands are numbers.
- // xmm4 and xmm5 are volatile XMM registers.
- FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
+ // Fast-case: Both operands are numbers.
+ // xmm4 and xmm5 are volatile XMM registers.
+ FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
- switch (op_) {
- case Token::ADD: __ addsd(xmm4, xmm5); break;
- case Token::SUB: __ subsd(xmm4, xmm5); break;
- case Token::MUL: __ mulsd(xmm4, xmm5); break;
- case Token::DIV: __ divsd(xmm4, xmm5); break;
- default: UNREACHABLE();
- }
- // Allocate a heap number, if needed.
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm4, xmm5); break;
+ case Token::SUB: __ subsd(xmm4, xmm5); break;
+ case Token::MUL: __ mulsd(xmm4, xmm5); break;
+ case Token::DIV: __ divsd(xmm4, xmm5); break;
+ default: UNREACHABLE();
}
- }
- switch (mode) {
- case OVERWRITE_LEFT:
- __ JumpIfNotSmi(rdx, &skip_allocation);
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- __ movq(rax, rdx);
- break;
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
- GenerateReturn(masm);
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label skip_allocation, non_smi_result;
- FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: __ shrl_cl(rax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative. This can only happen for a shift
- // by zero, which also doesn't update the sign flag.
- __ testl(rax, rax);
- __ j(negative, &non_smi_result);
- }
- __ JumpIfNotValidSmiValue(rax, &non_smi_result);
- // Tag smi result, if possible, and return.
- __ Integer32ToSmi(rax, rax);
- GenerateReturn(masm);
-
- // All ops except SHR return a signed int32 that we load in a HeapNumber.
- if (op_ != Token::SHR && non_smi_result.is_linked()) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
- switch (mode_) {
+ // Allocate a heap number, if needed.
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ if (HasArgsReversed()) {
+ if (mode == OVERWRITE_RIGHT) {
+ mode = OVERWRITE_LEFT;
+ } else if (mode == OVERWRITE_LEFT) {
+ mode = OVERWRITE_RIGHT;
+ }
+ }
+ switch (mode) {
case OVERWRITE_LEFT:
+ __ JumpIfNotSmi(rdx, &skip_allocation);
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rdx, rbx);
+ __ bind(&skip_allocation);
+ __ movq(rax, rdx);
+ break;
case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
+ // If the argument in rax is already an object, we skip the
// allocation of a heap number.
- __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
__ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
- __ AllocateHeapNumber(rax, rcx, &call_runtime);
+ // Allocate a heap number for the result. Keep rax and rdx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rax, rbx);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
}
- // Store the result in the HeapNumber and return.
- __ movq(Operand(rsp, 1 * kPointerSize), rbx);
- __ fild_s(Operand(rsp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
GenerateReturn(masm);
+ __ bind(&not_floats);
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ !HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-number argument
+ // occurs (and only if smi code is skipped from the stub, otherwise
+ // the patching has already been done earlier in this case branch).
+ // A perfect moment to try patching to STRINGS for ADD operation.
+ if (op_ == Token::ADD) {
Mads Ager (chromium) 2010/03/17 10:10:59 Indentation is off here.
Vladislav Kaznacheev 2010/03/18 11:14:23 Done.
+ GenerateTypeTransition(masm);
+ }
+ }
+ break;
}
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label skip_allocation, non_smi_result;
+ FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
+ switch (op_) {
+ case Token::BIT_OR: __ orl(rax, rcx); break;
+ case Token::BIT_AND: __ andl(rax, rcx); break;
+ case Token::BIT_XOR: __ xorl(rax, rcx); break;
+ case Token::SAR: __ sarl_cl(rax); break;
+ case Token::SHL: __ shll_cl(rax); break;
+ case Token::SHR: __ shrl_cl(rax); break;
+ default: UNREACHABLE();
+ }
+ if (op_ == Token::SHR) {
+ // Check if result is non-negative. This can only happen for a shift
+ // by zero, which also doesn't update the sign flag.
+ __ testl(rax, rax);
+ __ j(negative, &non_smi_result);
+ }
+ __ JumpIfNotValidSmiValue(rax, &non_smi_result);
+ // Tag smi result, if possible, and return.
+ __ Integer32ToSmi(rax, rax);
+ GenerateReturn(masm);
- // SHR should return uint32 - go to runtime for non-smi/negative result.
- if (op_ == Token::SHR) {
- __ bind(&non_smi_result);
+ // All ops except SHR return a signed int32 that we load in
+ // a HeapNumber.
+ if (op_ != Token::SHR && non_smi_result.is_linked()) {
+ __ bind(&non_smi_result);
+ // Allocate a heap number if needed.
+ __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ JumpIfNotSmi(rax, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ __ AllocateHeapNumber(rax, rcx, &call_runtime);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+ __ fild_s(Operand(rsp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ GenerateReturn(masm);
+ }
+
+ // SHR should return uint32 - go to runtime for non-smi/negative result.
+ if (op_ == Token::SHR) {
+ __ bind(&non_smi_result);
+ }
+ break;
}
- break;
+ default: UNREACHABLE(); break;
}
- default: UNREACHABLE(); break;
}
// If all else fails, use the runtime system to get the correct
@@ -8795,22 +8825,21 @@
// stack in the correct order below the return address.
__ bind(&call_runtime);
if (HasArgsInRegisters()) {
- __ pop(rcx);
- if (HasArgsReversed()) {
- __ push(rax);
- __ push(rdx);
- } else {
- __ push(rdx);
- __ push(rax);
- }
- __ push(rcx);
+ GenerateRegisterArgsPush(masm);
}
+
switch (op_) {
case Token::ADD: {
// Test for string arguments before calling runtime.
Label not_strings, both_strings, not_string1, string1;
+
+ // If this stub has already generated FP-specific code then the arguments
+ // are already in rdx, rax
+ if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+
Condition is_smi;
- Result answer;
is_smi = masm->CheckSmi(rdx);
__ j(is_smi, &not_string1);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rdx);
@@ -8886,15 +8915,20 @@
default:
UNREACHABLE();
}
+
+ // Generate an unreachable reference to the DEFAULT stub so that it can be
Mads Ager (chromium) 2010/03/17 10:10:59 This still feels like a nasty hack to me. Does th
Vladislav Kaznacheev 2010/03/18 11:14:23 Added a TODO to experiment and remove. On 2010/03/
+ // found at the end of this stub when clearing ICs at GC.
+ if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
+ GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
+ __ TailCallStub(&uninit);
+ }
}
void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- // If arguments are not passed in registers read them from the stack.
- if (!HasArgsInRegisters()) {
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
- }
+ ASSERT(!HasArgsInRegisters());
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
}
@@ -8909,8 +8943,82 @@
}
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ ASSERT(HasArgsInRegisters());
+ __ pop(rcx);
+ if (HasArgsReversed()) {
+ __ push(rax);
+ __ push(rdx);
+ } else {
+ __ push(rdx);
+ __ push(rax);
+ }
+ __ push(rcx);
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
+
+ // Keep a copy of operands on the stack and make sure they are also in
+ // edx, eax.
Mads Ager (chromium) 2010/03/17 10:10:59 ia32 register names.
Vladislav Kaznacheev 2010/03/18 11:14:23 Done.
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ } else {
+ GenerateLoadArguments(masm);
+ }
+
+ // Internal frame is necessary to handle exceptions properly.
+ __ EnterInternalFrame();
+
+ // Push arguments on stack if the stub expects them there.
+ if (!HasArgsInRegisters()) {
+ __ push(rdx);
+ __ push(rax);
+ }
+ // Call the stub proper to get the result in rax.
+ __ call(&get_result);
+ __ LeaveInternalFrame();
+
+ // Left and right arguments are already on stack.
+ __ pop(rcx);
+ // Push the operation result. The tail call to BinaryOp_Patch will
+ // return it to the original caller..
+ __ push(rax);
+
+ // Push this stub's key.
+ __ movq(rax, Immediate(MinorKey()));
+ __ Integer32ToSmi(rax, rax);
+ __ push(rax);
+
+ // Although the operation and the type info are encoded into the key,
+ // the encoding is opaque, so push them too.
+ __ movq(rax, Immediate(op_));
+ __ Integer32ToSmi(rax, rax);
+ __ push(rax);
+
+ __ movq(rax, Immediate(runtime_operands_type_));
+ __ Integer32ToSmi(rax, rax);
+ __ push(rax);
+
+ __ push(rcx);
+
+ // Perform patching to an appropriate fast case and return the result.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+ 6,
+ 1);
+
+ // The entry point for the result calculation is assumed to be immediately
+ // after this sequence.
+ __ bind(&get_result);
+}
+
+
Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- return Handle<Code>::null();
+ GenericBinaryOpStub stub(key, type_info);
+ HandleScope scope;
Mads Ager (chromium) 2010/03/17 10:10:59 This returns a handle from a destroyed handle scop
Vladislav Kaznacheev 2010/03/18 11:14:23 Done.
+ return stub.GetCode();
}
« no previous file with comments | « src/x64/codegen-x64.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698