Index: src/codegen-ia32.cc |
=================================================================== |
--- src/codegen-ia32.cc (revision 535) |
+++ src/codegen-ia32.cc (working copy) |
@@ -686,34 +686,50 @@ |
}; |
+// Flag that indicates whether or not the code for dealing with smis |
+// is inlined or should be dealt with in the stub. |
+enum GenericBinaryFlags { |
+ SMI_CODE_IN_STUB, |
+ SMI_CODE_INLINED |
+}; |
+ |
+ |
class GenericBinaryOpStub: public CodeStub { |
public: |
- GenericBinaryOpStub(Token::Value op, OverwriteMode mode) |
- : op_(op), mode_(mode) { } |
+ GenericBinaryOpStub(Token::Value op, |
+ OverwriteMode mode, |
+ GenericBinaryFlags flags) |
+ : op_(op), mode_(mode), flags_(flags) { } |
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow); |
+ |
private: |
Token::Value op_; |
OverwriteMode mode_; |
+ GenericBinaryFlags flags_; |
const char* GetName(); |
#ifdef DEBUG |
void Print() { |
- PrintF("GenericBinaryOpStub (op %s), (mode %d)\n", |
+ PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n", |
Token::String(op_), |
- static_cast<int>(mode_)); |
+ static_cast<int>(mode_), |
+ static_cast<int>(flags_)); |
} |
#endif |
- // Minor key encoding in 16 bits OOOOOOOOOOOOOOMM. |
+ // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM. |
class ModeBits: public BitField<OverwriteMode, 0, 2> {}; |
- class OpBits: public BitField<Token::Value, 2, 14> {}; |
+ class OpBits: public BitField<Token::Value, 2, 13> {}; |
+ class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {}; |
Major MajorKey() { return GenericBinaryOp; } |
int MinorKey() { |
// Encode the parameters in a unique 16 bit value. |
return OpBits::encode(op_) | |
- ModeBits::encode(mode_); |
+ ModeBits::encode(mode_) | |
+ FlagBits::encode(flags_); |
} |
void Generate(MacroAssembler* masm); |
}; |
@@ -736,116 +752,85 @@ |
} |
+class DeferredInlineBinaryOperation: public DeferredCode { |
+ public: |
+ DeferredInlineBinaryOperation(CodeGenerator* generator, |
+ Token::Value op, |
+ OverwriteMode mode, |
+ GenericBinaryFlags flags) |
+ : DeferredCode(generator), stub_(op, mode, flags) { } |
+ |
+ void GenerateInlineCode() { |
+ stub_.GenerateSmiCode(masm(), enter()); |
+ } |
+ |
+ virtual void Generate() { |
+ __ push(ebx); |
+ __ CallStub(&stub_); |
+ // We must preserve the eax value here, because it will be written |
+ // to the top-of-stack element when getting back to the fast case |
+ // code. See comment in GenericBinaryOperation where |
+ // deferred->exit() is bound. |
+ __ push(eax); |
iposva
2008/10/21 16:59:22
This push here relies on the fact that the code ge
|
+ } |
+ |
+ private: |
+ GenericBinaryOpStub stub_; |
+}; |
+ |
+ |
void CodeGenerator::GenericBinaryOperation(Token::Value op, |
- OverwriteMode overwrite_mode) { |
+ OverwriteMode overwrite_mode) { |
Comment cmnt(masm_, "[ BinaryOperation"); |
Comment cmnt_token(masm_, Token::String(op)); |
+ |
+ if (op == Token::COMMA) { |
+ // Simply discard left value. |
+ frame_->Pop(eax); |
+ frame_->Pop(); |
+ frame_->Push(eax); |
+ return; |
+ } |
+ |
+ // For now, we keep the old behavior and only inline the smi code |
+ // for the bitwise operations. |
+ GenericBinaryFlags flags; |
switch (op) { |
- case Token::ADD: |
- case Token::SUB: |
- case Token::MUL: |
- case Token::DIV: |
- case Token::MOD: { |
- GenericBinaryOpStub stub(op, overwrite_mode); |
- __ CallStub(&stub); |
- frame_->Push(eax); |
- break; |
- } |
case Token::BIT_OR: |
case Token::BIT_AND: |
- case Token::BIT_XOR: { |
- Label slow, exit; |
- frame_->Pop(eax); // get y |
- frame_->Pop(edx); // get x |
- __ mov(ecx, Operand(edx)); // Prepare smi check. |
- // tag check |
- __ or_(ecx, Operand(eax)); // ecx = x | y; |
- ASSERT(kSmiTag == 0); // adjust code below |
- __ test(ecx, Immediate(kSmiTagMask)); |
- __ j(not_zero, &slow, taken); |
- switch (op) { |
- case Token::BIT_OR: __ or_(eax, Operand(edx)); break; |
- case Token::BIT_AND: __ and_(eax, Operand(edx)); break; |
- case Token::BIT_XOR: __ xor_(eax, Operand(edx)); break; |
- default: UNREACHABLE(); |
- } |
- __ jmp(&exit); |
- __ bind(&slow); |
- frame_->Push(edx); // restore stack slots |
- frame_->Push(eax); |
- GenericBinaryOpStub stub(op, overwrite_mode); |
- __ CallStub(&stub); |
- __ bind(&exit); |
- frame_->Push(eax); // push the result to the stack |
- break; |
- } |
+ case Token::BIT_XOR: |
case Token::SHL: |
case Token::SHR: |
- case Token::SAR: { |
- Label slow, exit; |
- frame_->Pop(edx); // get y |
- frame_->Pop(eax); // get x |
- // tag check |
- __ mov(ecx, Operand(edx)); |
- __ or_(ecx, Operand(eax)); // ecx = x | y; |
- ASSERT(kSmiTag == 0); // adjust code below |
- __ test(ecx, Immediate(kSmiTagMask)); |
- __ j(not_zero, &slow, not_taken); |
- // get copies of operands |
- __ mov(ebx, Operand(eax)); |
- __ mov(ecx, Operand(edx)); |
- // remove tags from operands (but keep sign) |
- __ sar(ebx, kSmiTagSize); |
- __ sar(ecx, kSmiTagSize); |
- // perform operation |
- switch (op) { |
- case Token::SAR: |
- __ sar(ebx); |
- // no checks of result necessary |
- break; |
- case Token::SHR: |
- __ shr(ebx); |
- // Check that the *unsigned* result fits in a smi. |
- // neither of the two high-order bits can be set: |
- // - 0x80000000: high bit would be lost when smi tagging. |
- // - 0x40000000: this number would convert to negative when |
- // smi tagging these two cases can only happen with shifts |
- // by 0 or 1 when handed a valid smi. |
- __ test(ebx, Immediate(0xc0000000)); |
- __ j(not_zero, &slow, not_taken); |
- break; |
- case Token::SHL: |
- __ shl(ebx); |
- // Check that the *signed* result fits in a smi. |
- __ lea(ecx, Operand(ebx, 0x40000000)); |
- __ test(ecx, Immediate(0x80000000)); |
- __ j(not_zero, &slow, not_taken); |
- break; |
- default: UNREACHABLE(); |
- } |
- // tag result and store it in TOS (eax) |
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case |
- __ lea(eax, Operand(ebx, times_2, kSmiTag)); |
- __ jmp(&exit); |
- // slow case |
- __ bind(&slow); |
- frame_->Push(eax); // restore stack |
- frame_->Push(edx); |
- GenericBinaryOpStub stub(op, overwrite_mode); |
- __ CallStub(&stub); |
- __ bind(&exit); |
- frame_->Push(eax); |
+ case Token::SAR: |
+ flags = SMI_CODE_INLINED; |
break; |
- } |
- case Token::COMMA: { |
- // simply discard left value |
- frame_->Pop(eax); |
- frame_->Pop(); |
- frame_->Push(eax); |
+ |
+ default: |
+ flags = SMI_CODE_IN_STUB; |
break; |
- } |
- default: UNREACHABLE(); |
} |
+ |
+ if (flags == SMI_CODE_INLINED) { |
+ // Create a new deferred code for the slow-case part. |
+ DeferredInlineBinaryOperation* deferred = |
+ new DeferredInlineBinaryOperation(this, op, overwrite_mode, flags); |
+ // Fetch the operands from the stack. |
+ frame_->Pop(ebx); // get y |
+ __ mov(eax, frame_->Top()); // get x |
iposva
2008/10/21 16:59:22
This should be a Pop(reg) to balance it with the r
|
+ // Generate the inline part of the code. |
+ deferred->GenerateInlineCode(); |
+ // Put result back on the stack. It seems somewhat weird to let |
+ // the deferred code jump back before the assignment to the frame |
+ // top, but this is just to let the peephole optimizer get rid of |
+ // more code. |
+ __ bind(deferred->exit()); |
+ __ mov(frame_->Top(), eax); |
iposva
2008/10/21 16:59:22
We should frame_->Push(eax) here, since the deferr
|
+ } else { |
+ // Call the stub and push the result to the stack. |
+ GenericBinaryOpStub stub(op, overwrite_mode, flags); |
+ __ CallStub(&stub); |
+ frame_->Push(eax); |
+ } |
} |
@@ -861,7 +846,7 @@ |
virtual void Generate() { |
__ push(eax); |
__ push(Immediate(Smi::FromInt(value_))); |
- GenericBinaryOpStub igostub(op_, overwrite_mode_); |
+ GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); |
__ CallStub(&igostub); |
} |
@@ -884,7 +869,7 @@ |
virtual void Generate() { |
__ push(Immediate(Smi::FromInt(value_))); |
__ push(eax); |
- GenericBinaryOpStub igostub(op_, overwrite_mode_); |
+ GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); |
__ CallStub(&igostub); |
} |
@@ -909,7 +894,7 @@ |
__ sub(Operand(eax), immediate); |
__ push(eax); |
__ push(immediate); |
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_); |
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); |
__ CallStub(&igostub); |
} |
@@ -933,7 +918,7 @@ |
__ sub(Operand(eax), immediate); |
__ push(immediate); |
__ push(eax); |
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_); |
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); |
__ CallStub(&igostub); |
} |
@@ -957,7 +942,7 @@ |
__ add(Operand(eax), immediate); |
__ push(eax); |
__ push(immediate); |
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_); |
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); |
__ CallStub(&igostub); |
} |
@@ -983,7 +968,7 @@ |
__ add(eax, Operand(tos_reg_)); |
__ push(eax); |
__ push(tos_reg_); |
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_); |
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); |
__ CallStub(&igostub); |
} |
@@ -3959,182 +3944,172 @@ |
} |
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
- Label call_runtime; |
- __ mov(eax, Operand(esp, 1 * kPointerSize)); // Get y. |
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // Get x. |
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
+ // Perform fast-case smi code for the operation (eax <op> ebx) and |
+ // leave result in register eax. |
- // 1. Smi case. |
+ // Prepare the smi check of both operands by or'ing them together |
+ // before checking against the smi mask. |
+ __ mov(ecx, Operand(ebx)); |
+ __ or_(ecx, Operand(eax)); |
+ |
switch (op_) { |
- case Token::ADD: { |
- // eax: y. |
- // edx: x. |
- Label revert; |
- __ mov(ecx, Operand(eax)); |
- __ or_(ecx, Operand(edx)); // ecx = x | y. |
- __ add(eax, Operand(edx)); // Add y optimistically. |
- // Go slow-path in case of overflow. |
- __ j(overflow, &revert, not_taken); |
- // Go slow-path in case of non-smi operands. |
- ASSERT(kSmiTag == 0); // adjust code below |
- __ test(ecx, Immediate(kSmiTagMask)); |
- __ j(not_zero, &revert, not_taken); |
- __ ret(2 * kPointerSize); // Remove all operands. |
+ case Token::ADD: |
+ __ add(eax, Operand(ebx)); // add optimistically |
+ __ j(overflow, slow, not_taken); |
+ break; |
- // Revert optimistic add. |
- __ bind(&revert); |
- __ sub(eax, Operand(edx)); |
+ case Token::SUB: |
+ __ sub(eax, Operand(ebx)); // subtract optimistically |
+ __ j(overflow, slow, not_taken); |
break; |
- } |
- case Token::SUB: { |
- // eax: y. |
- // edx: x. |
- Label revert; |
- __ mov(ecx, Operand(edx)); |
- __ or_(ecx, Operand(eax)); // ecx = x | y. |
- __ sub(edx, Operand(eax)); // Subtract y optimistically. |
- // Go slow-path in case of overflow. |
- __ j(overflow, &revert, not_taken); |
- // Go slow-path in case of non-smi operands. |
- ASSERT(kSmiTag == 0); // adjust code below |
- __ test(ecx, Immediate(kSmiTagMask)); |
- __ j(not_zero, &revert, not_taken); |
- __ mov(eax, Operand(edx)); |
- __ ret(2 * kPointerSize); // Remove all operands. |
- // Revert optimistic sub. |
- __ bind(&revert); |
- __ add(edx, Operand(eax)); |
+ case Token::DIV: |
+ case Token::MOD: |
+ // Sign extend eax into edx:eax. |
+ __ cdq(); |
+ // Check for 0 divisor. |
+ __ test(ebx, Operand(ebx)); |
+ __ j(zero, slow, not_taken); |
break; |
- } |
- case Token::MUL: { |
- // eax: y |
- // edx: x |
- // a) both operands smi and result fits into a smi -> return. |
- // b) at least one of operands non-smi -> non_smi_operands. |
- // c) result does not fit in a smi -> non_smi_result. |
- Label non_smi_operands, non_smi_result; |
- // Tag check. |
- __ mov(ecx, Operand(edx)); |
- __ or_(ecx, Operand(eax)); // ecx = x | y. |
- ASSERT(kSmiTag == 0); // Adjust code below. |
- __ test(ecx, Immediate(kSmiTagMask)); |
- // Jump if not both smi; check if float numbers. |
- __ j(not_zero, &non_smi_operands, not_taken); |
- // Get copies of operands. |
- __ mov(ebx, Operand(eax)); |
- __ mov(ecx, Operand(edx)); |
+ default: |
+ // Fall-through to smi check. |
+ break; |
+ } |
+ |
+ // Perform the actual smi check. |
+ ASSERT(kSmiTag == 0); // adjust zero check if not the case |
+ __ test(ecx, Immediate(kSmiTagMask)); |
+ __ j(not_zero, slow, not_taken); |
+ |
+ switch (op_) { |
+ case Token::ADD: |
+ case Token::SUB: |
+ // Do nothing here. |
+ break; |
+ |
+ case Token::MUL: |
// If the smi tag is 0 we can just leave the tag on one operand. |
- ASSERT(kSmiTag == 0); // adjust code below |
+ ASSERT(kSmiTag == 0); // adjust code below if not the case |
// Remove tag from one of the operands (but keep sign). |
- __ sar(ecx, kSmiTagSize); |
+ __ sar(eax, kSmiTagSize); |
// Do multiplication. |
- __ imul(eax, Operand(ecx)); // Multiplication of Smis; result in eax. |
+ __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax |
// Go slow on overflows. |
- __ j(overflow, &non_smi_result, not_taken); |
- // ...but operands OK for float arithmetic. |
- |
- // If the result is +0 we may need to check if the result should |
- // really be -0. Welcome to the -0 fan club. |
- __ NegativeZeroTest(eax, ebx, edx, ecx, &non_smi_result); |
- |
- __ ret(2 * kPointerSize); |
- |
- __ bind(&non_smi_result); |
- // TODO(1243132): Do not check float operands here. |
- __ bind(&non_smi_operands); |
- __ mov(eax, Operand(esp, 1 * kPointerSize)); |
- __ mov(edx, Operand(esp, 2 * kPointerSize)); |
+ __ j(overflow, slow, not_taken); |
+ // Check for negative zero result. |
+ __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y |
break; |
- } |
- case Token::DIV: { |
- // eax: y |
- // edx: x |
- Label non_smi_operands, non_smi_result, division_by_zero; |
- __ mov(ebx, Operand(eax)); // Get y |
- __ mov(eax, Operand(edx)); // Get x |
- __ cdq(); // Sign extend eax into edx:eax. |
- // Tag check. |
- __ mov(ecx, Operand(ebx)); |
- __ or_(ecx, Operand(eax)); // ecx = x | y. |
- ASSERT(kSmiTag == 0); // Adjust code below. |
- __ test(ecx, Immediate(kSmiTagMask)); |
- // Jump if not both smi; check if float numbers. |
- __ j(not_zero, &non_smi_operands, not_taken); |
- __ test(ebx, Operand(ebx)); // Check for 0 divisor. |
- __ j(zero, &division_by_zero, not_taken); |
- |
+ case Token::DIV: |
+ // Divide edx:eax by ebx. |
__ idiv(ebx); |
- // Check for the corner case of dividing the most negative smi by -1. |
- // (We cannot use the overflow flag, since it is not set by idiv.) |
+ // Check for the corner case of dividing the most negative smi |
+ // by -1. We cannot use the overflow flag, since it is not set |
+ // by idiv instruction. |
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
__ cmp(eax, 0x40000000); |
- __ j(equal, &non_smi_result); |
- // If the result is +0 we may need to check if the result should |
- // really be -0. Welcome to the -0 fan club. |
- __ NegativeZeroTest(eax, ecx, &non_smi_result); // Use ecx = x | y. |
+ __ j(equal, slow); |
+ // Check for negative zero result. |
+ __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y |
+ // Check that the remainder is zero. |
__ test(edx, Operand(edx)); |
- // Use floats if there's a remainder. |
- __ j(not_zero, &non_smi_result, not_taken); |
- __ shl(eax, kSmiTagSize); |
- __ ret(2 * kPointerSize); // Remove all operands. |
- |
- __ bind(&division_by_zero); |
- __ mov(eax, Operand(esp, 1 * kPointerSize)); |
- __ mov(edx, Operand(esp, 2 * kPointerSize)); |
- __ jmp(&call_runtime); // Division by zero must go through runtime. |
- |
- __ bind(&non_smi_result); |
- // TODO(1243132): Do not check float operands here. |
- __ bind(&non_smi_operands); |
- __ mov(eax, Operand(esp, 1 * kPointerSize)); |
- __ mov(edx, Operand(esp, 2 * kPointerSize)); |
+ __ j(not_zero, slow); |
+ // Tag the result and store it in register eax. |
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case |
+ __ lea(eax, Operand(eax, times_2, kSmiTag)); |
break; |
- } |
- case Token::MOD: { |
- Label slow; |
- __ mov(ebx, Operand(eax)); // get y |
- __ mov(eax, Operand(edx)); // get x |
- __ cdq(); // sign extend eax into edx:eax |
- // tag check |
- __ mov(ecx, Operand(ebx)); |
- __ or_(ecx, Operand(eax)); // ecx = x | y; |
- ASSERT(kSmiTag == 0); // adjust code below |
- __ test(ecx, Immediate(kSmiTagMask)); |
- __ j(not_zero, &slow, not_taken); |
- __ test(ebx, Operand(ebx)); // test for y == 0 |
- __ j(zero, &slow); |
- // Fast case: Do integer division and use remainder. |
+ case Token::MOD: |
+ // Divide edx:eax by ebx. |
__ idiv(ebx); |
- __ NegativeZeroTest(edx, ecx, &slow); // use ecx = x | y |
+ // Check for negative zero result. |
+ __ NegativeZeroTest(edx, ecx, slow); // use ecx = x | y |
+ // Move remainder to register eax. |
__ mov(eax, Operand(edx)); |
- __ ret(2 * kPointerSize); |
- |
- // Slow case: Call runtime operator implementation. |
- __ bind(&slow); |
- __ mov(eax, Operand(esp, 1 * kPointerSize)); |
- __ mov(edx, Operand(esp, 2 * kPointerSize)); |
- // Fall through to |call_runtime|. |
break; |
- } |
+ |
case Token::BIT_OR: |
+ __ or_(eax, Operand(ebx)); |
+ break; |
+ |
case Token::BIT_AND: |
+ __ and_(eax, Operand(ebx)); |
+ break; |
+ |
case Token::BIT_XOR: |
- case Token::SAR: |
+ __ xor_(eax, Operand(ebx)); |
+ break; |
+ |
case Token::SHL: |
- case Token::SHR: { |
- // Smi-case for bitops should already have been inlined. |
+ case Token::SHR: |
+ case Token::SAR: |
+ // Move the second operand into register ecx. |
+ __ mov(ecx, Operand(ebx)); |
+ // Remove tags from operands (but keep sign). |
+ __ sar(eax, kSmiTagSize); |
+ __ sar(ecx, kSmiTagSize); |
+ // Perform the operation. |
+ switch (op_) { |
+ case Token::SAR: |
+ __ sar(eax); |
+ // No checks of result necessary |
+ break; |
+ case Token::SHR: |
+ __ shr(eax); |
+ // Check that the *unsigned* result fits in a smi. |
+ // Neither of the two high-order bits can be set: |
+ // - 0x80000000: high bit would be lost when smi tagging. |
+ // - 0x40000000: this number would convert to negative when |
+ // Smi tagging these two cases can only happen with shifts |
+ // by 0 or 1 when handed a valid smi. |
+ __ test(eax, Immediate(0xc0000000)); |
+ __ j(not_zero, slow, not_taken); |
+ break; |
+ case Token::SHL: |
+ __ shl(eax); |
+ // Check that the *signed* result fits in a smi. |
+ __ lea(ecx, Operand(eax, 0x40000000)); |
+ __ test(ecx, Immediate(0x80000000)); |
+ __ j(not_zero, slow, not_taken); |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ } |
+ // Tag the result and store it in register eax. |
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case |
+ __ lea(eax, Operand(eax, times_2, kSmiTag)); |
break; |
- } |
- default: { |
+ |
+ default: |
UNREACHABLE(); |
- } |
+ break; |
} |
+} |
- // 2. Floating point case. |
+ |
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
+ Label call_runtime; |
+ |
+ if (flags_ == SMI_CODE_IN_STUB) { |
+ // The fast case smi code wasn't inlined in the stub caller |
+ // code. Generate it here to speed up common operations. |
+ Label slow; |
+ __ mov(ebx, Operand(esp, 1 * kPointerSize)); // get y |
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // get x |
+ GenerateSmiCode(masm, &slow); |
+ __ ret(2 * kPointerSize); // remove both operands |
+ |
+ // Too bad. The fast case smi code didn't succeed. |
+ __ bind(&slow); |
+ } |
+ |
+ // Setup registers. |
+ __ mov(eax, Operand(esp, 1 * kPointerSize)); // get y |
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // get x |
+ |
+ // Floating point case. |
switch (op_) { |
case Token::ADD: |
case Token::SUB: |
@@ -4276,7 +4251,8 @@ |
default: UNREACHABLE(); break; |
} |
- // 3. If all else fails, use the runtime system to get the correct result. |
+ // If all else fails, use the runtime system to get the correct |
+ // result. |
__ bind(&call_runtime); |
switch (op_) { |
case Token::ADD: |