Index: src/x64/full-codegen-x64.cc |
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc |
index bac4e793b279bf4a86353db7024aa92352219e79..9ef38923a9418135a273e6708792be1666819d93 100644 |
--- a/src/x64/full-codegen-x64.cc |
+++ b/src/x64/full-codegen-x64.cc |
@@ -2286,7 +2286,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, |
__ SmiShiftArithmeticRight(rax, rdx, rcx); |
break; |
case Token::SHL: |
- __ SmiShiftLeft(rax, rdx, rcx); |
+ __ SmiShiftLeft(rax, rdx, rcx, &stub_call); |
break; |
case Token::SHR: |
__ SmiShiftLogicalRight(rax, rdx, rcx, &stub_call); |
@@ -4454,10 +4454,22 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { |
if (ShouldInlineSmiCase(expr->op())) { |
if (expr->op() == Token::INC) { |
__ SmiAddConstant(rax, rax, Smi::FromInt(1)); |
danno
2013/08/01 16:45:41
Handle the overflow case inside the macro assemble
haitao.feng
2013/08/02 09:35:51
I am OK to change this but this might affect perfo
|
+ if (kSmiValueSize == 31) { |
+ // positive overflow |
+ __ testl(rax, Immediate(0x80000000)); |
+ __ j(not_zero, &stub_call, Label::kNear); |
+ } |
} else { |
__ SmiSubConstant(rax, rax, Smi::FromInt(1)); |
+ if (kSmiValueSize == 31) { |
+ // negative overflow |
+ __ testl(rax, Immediate(0x80000000)); |
+ __ j(zero, &stub_call, Label::kNear); |
+ } |
+ } |
+ if (kSmiValueSize == 32) { |
+ __ j(overflow, &stub_call, Label::kNear); |
} |
- __ j(overflow, &stub_call, Label::kNear); |
// We could eliminate this smi check if we split the code at |
// the first smi check before calling ToNumber. |
patch_site.EmitJumpIfSmi(rax, &done, Label::kNear); |