| Index: src/ia32/codegen-ia32.cc
|
| ===================================================================
|
| --- src/ia32/codegen-ia32.cc (revision 4147)
|
| +++ src/ia32/codegen-ia32.cc (working copy)
|
| @@ -112,6 +112,8 @@
|
| allocator_(NULL),
|
| state_(NULL),
|
| loop_nesting_(0),
|
| + in_safe_int32_mode_(false),
|
| + safe_int32_mode_enabled_(true),
|
| function_return_is_shadowed_(false),
|
| in_spilled_code_(false) {
|
| }
|
| @@ -437,14 +439,14 @@
|
| // frame. If the expression is boolean-valued it may be compiled (or
|
| // partially compiled) into control flow to the control destination.
|
| // If force_control is true, control flow is forced.
|
| -void CodeGenerator::LoadCondition(Expression* x,
|
| +void CodeGenerator::LoadCondition(Expression* expr,
|
| ControlDestination* dest,
|
| bool force_control) {
|
| ASSERT(!in_spilled_code());
|
| int original_height = frame_->height();
|
|
|
| { CodeGenState new_state(this, dest);
|
| - Visit(x);
|
| + Visit(expr);
|
|
|
| // If we hit a stack overflow, we may not have actually visited
|
| // the expression. In that case, we ensure that we have a
|
| @@ -481,64 +483,157 @@
|
| }
|
|
|
|
|
| +void CodeGenerator::LoadInSafeInt32Mode(Expression* expr,
|
| + BreakTarget* unsafe_bailout) {
|
| + set_unsafe_bailout(unsafe_bailout);
|
| + set_in_safe_int32_mode(true);
|
| + Load(expr);
|
| + Result value = frame_->Pop();
|
| + ASSERT(frame_->HasNoUntaggedInt32Elements());
|
| + ConvertInt32ResultToNumber(&value);
|
| + set_in_safe_int32_mode(false);
|
| + set_unsafe_bailout(NULL);
|
| + frame_->Push(&value);
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::LoadWithSafeInt32ModeDisabled(Expression* expr) {
|
| + set_safe_int32_mode_enabled(false);
|
| + Load(expr);
|
| + set_safe_int32_mode_enabled(true);
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
|
| + ASSERT(value->is_untagged_int32());
|
| + if (value->is_register()) {
|
| + Register val = value->reg();
|
| + JumpTarget done;
|
| + __ add(val, Operand(val));
|
| + done.Branch(no_overflow, value);
|
| + __ sar(val, 1);
|
| + // If there was an overflow, bits 30 and 31 of the original number disagree.
|
| + __ xor_(val, 0x80000000u);
|
| + if (CpuFeatures::IsSupported(SSE2)) {
|
| + CpuFeatures::Scope fscope(SSE2);
|
| + __ cvtsi2sd(xmm0, Operand(val));
|
| + } else {
|
| + // Move val to ST[0] in the FPU
|
| + // Push and pop are safe with respect to the virtual frame because
|
| + // all synced elements are below the actual stack pointer.
|
| + __ push(val);
|
| + __ fild_s(Operand(esp, 0));
|
| + __ pop(val);
|
| + }
|
| + Result scratch = allocator_->Allocate();
|
| + ASSERT(scratch.is_register());
|
| + Label allocation_failed;
|
| + __ AllocateHeapNumber(val, scratch.reg(),
|
| + no_reg, &allocation_failed);
|
| + VirtualFrame* clone = new VirtualFrame(frame_);
|
| + scratch.Unuse();
|
| + if (CpuFeatures::IsSupported(SSE2)) {
|
| + CpuFeatures::Scope fscope(SSE2);
|
| + __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
|
| + } else {
|
| + __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset));
|
| + }
|
| + done.Jump(value);
|
| +
|
| + // Establish the virtual frame, cloned from where AllocateHeapNumber
|
| + // jumped to allocation_failed.
|
| + RegisterFile empty_regs;
|
| + SetFrame(clone, &empty_regs);
|
| + __ bind(&allocation_failed);
|
| + unsafe_bailout_->Jump();
|
| +
|
| + done.Bind(value);
|
| + } else {
|
| + ASSERT(value->is_constant());
|
| + }
|
| + value->set_untagged_int32(false);
|
| +}
|
| +
|
| +
|
| void CodeGenerator::Load(Expression* expr) {
|
| #ifdef DEBUG
|
| int original_height = frame_->height();
|
| #endif
|
| ASSERT(!in_spilled_code());
|
| - JumpTarget true_target;
|
| - JumpTarget false_target;
|
| - ControlDestination dest(&true_target, &false_target, true);
|
| - LoadCondition(expr, &dest, false);
|
| + JumpTarget done;
|
|
|
| - if (dest.false_was_fall_through()) {
|
| - // The false target was just bound.
|
| - JumpTarget loaded;
|
| - frame_->Push(Factory::false_value());
|
| - // There may be dangling jumps to the true target.
|
| - if (true_target.is_linked()) {
|
| - loaded.Jump();
|
| - true_target.Bind();
|
| - frame_->Push(Factory::true_value());
|
| - loaded.Bind();
|
| - }
|
| + // If the expression should be a side-effect-free 32-bit int computation,
|
| + // compile that SafeInt32 path, and a bailout path.
|
| + if (!in_safe_int32_mode() &&
|
| + safe_int32_mode_enabled() &&
|
| + expr->side_effect_free() &&
|
| + expr->num_bit_ops() > 2 &&
|
| + CpuFeatures::IsSupported(SSE2)) {
|
| + BreakTarget unsafe_bailout;
|
| + unsafe_bailout.set_expected_height(frame_->height());
|
| + LoadInSafeInt32Mode(expr, &unsafe_bailout);
|
| + done.Jump();
|
|
|
| - } else if (dest.is_used()) {
|
| - // There is true, and possibly false, control flow (with true as
|
| - // the fall through).
|
| - JumpTarget loaded;
|
| - frame_->Push(Factory::true_value());
|
| - if (false_target.is_linked()) {
|
| - loaded.Jump();
|
| - false_target.Bind();
|
| - frame_->Push(Factory::false_value());
|
| - loaded.Bind();
|
| + if (unsafe_bailout.is_linked()) {
|
| + unsafe_bailout.Bind();
|
| + LoadWithSafeInt32ModeDisabled(expr);
|
| }
|
| + } else {
|
| + JumpTarget true_target;
|
| + JumpTarget false_target;
|
|
|
| - } else {
|
| - // We have a valid value on top of the frame, but we still may
|
| - // have dangling jumps to the true and false targets from nested
|
| - // subexpressions (eg, the left subexpressions of the
|
| - // short-circuited boolean operators).
|
| - ASSERT(has_valid_frame());
|
| - if (true_target.is_linked() || false_target.is_linked()) {
|
| + ControlDestination dest(&true_target, &false_target, true);
|
| + LoadCondition(expr, &dest, false);
|
| +
|
| + if (dest.false_was_fall_through()) {
|
| + // The false target was just bound.
|
| JumpTarget loaded;
|
| - loaded.Jump(); // Don't lose the current TOS.
|
| + frame_->Push(Factory::false_value());
|
| + // There may be dangling jumps to the true target.
|
| if (true_target.is_linked()) {
|
| + loaded.Jump();
|
| true_target.Bind();
|
| frame_->Push(Factory::true_value());
|
| - if (false_target.is_linked()) {
|
| - loaded.Jump();
|
| - }
|
| + loaded.Bind();
|
| }
|
| +
|
| + } else if (dest.is_used()) {
|
| + // There is true, and possibly false, control flow (with true as
|
| + // the fall through).
|
| + JumpTarget loaded;
|
| + frame_->Push(Factory::true_value());
|
| if (false_target.is_linked()) {
|
| + loaded.Jump();
|
| false_target.Bind();
|
| frame_->Push(Factory::false_value());
|
| + loaded.Bind();
|
| }
|
| - loaded.Bind();
|
| +
|
| + } else {
|
| + // We have a valid value on top of the frame, but we still may
|
| + // have dangling jumps to the true and false targets from nested
|
| + // subexpressions (eg, the left subexpressions of the
|
| + // short-circuited boolean operators).
|
| + ASSERT(has_valid_frame());
|
| + if (true_target.is_linked() || false_target.is_linked()) {
|
| + JumpTarget loaded;
|
| + loaded.Jump(); // Don't lose the current TOS.
|
| + if (true_target.is_linked()) {
|
| + true_target.Bind();
|
| + frame_->Push(Factory::true_value());
|
| + if (false_target.is_linked()) {
|
| + loaded.Jump();
|
| + }
|
| + }
|
| + if (false_target.is_linked()) {
|
| + false_target.Bind();
|
| + frame_->Push(Factory::false_value());
|
| + }
|
| + loaded.Bind();
|
| + }
|
| }
|
| }
|
| -
|
| + done.Bind();
|
| ASSERT(has_valid_frame());
|
| ASSERT(frame_->height() == original_height + 1);
|
| }
|
| @@ -4312,7 +4407,7 @@
|
|
|
| void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
|
| Comment cmnt(masm_, "[ FunctionLiteral");
|
| -
|
| + ASSERT(!in_safe_int32_mode());
|
| // Build the function boilerplate and instantiate it.
|
| Handle<JSFunction> boilerplate =
|
| Compiler::BuildBoilerplate(node, script(), this);
|
| @@ -4325,6 +4420,7 @@
|
|
|
| void CodeGenerator::VisitFunctionBoilerplateLiteral(
|
| FunctionBoilerplateLiteral* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
|
| Result result = InstantiateBoilerplate(node->boilerplate());
|
| frame()->Push(&result);
|
| @@ -4333,6 +4429,7 @@
|
|
|
| void CodeGenerator::VisitConditional(Conditional* node) {
|
| Comment cmnt(masm_, "[ Conditional");
|
| + ASSERT(!in_safe_int32_mode());
|
| JumpTarget then;
|
| JumpTarget else_;
|
| JumpTarget exit;
|
| @@ -4503,6 +4600,7 @@
|
| Slot* slot,
|
| TypeofState typeof_state,
|
| JumpTarget* slow) {
|
| + ASSERT(!in_safe_int32_mode());
|
| // Check that no extension objects have been created by calls to
|
| // eval from the current scope to the global scope.
|
| Register context = esi;
|
| @@ -4671,10 +4769,20 @@
|
| }
|
|
|
|
|
| -void CodeGenerator::VisitSlot(Slot* node) {
|
| +void CodeGenerator::VisitSlot(Slot* slot) {
|
| Comment cmnt(masm_, "[ Slot");
|
| - Result result = LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
|
| - frame()->Push(&result);
|
| + if (in_safe_int32_mode()) {
|
| + if ((slot->type() == Slot::LOCAL && !slot->is_arguments())) {
|
| + frame()->UntaggedPushLocalAt(slot->index());
|
| + } else if (slot->type() == Slot::PARAMETER) {
|
| + frame()->UntaggedPushParameterAt(slot->index());
|
| + } else {
|
| + UNREACHABLE();
|
| + }
|
| + } else {
|
| + Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
|
| + frame()->Push(&result);
|
| + }
|
| }
|
|
|
|
|
| @@ -4686,6 +4794,7 @@
|
| Visit(expr);
|
| } else {
|
| ASSERT(var->is_global());
|
| + ASSERT(!in_safe_int32_mode());
|
| Reference ref(this, node);
|
| ref.GetValue();
|
| }
|
| @@ -4694,7 +4803,11 @@
|
|
|
| void CodeGenerator::VisitLiteral(Literal* node) {
|
| Comment cmnt(masm_, "[ Literal");
|
| - frame_->Push(node->handle());
|
| + if (in_safe_int32_mode()) {
|
| + frame_->PushUntaggedElement(node->handle());
|
| + } else {
|
| + frame_->Push(node->handle());
|
| + }
|
| }
|
|
|
|
|
| @@ -4768,6 +4881,7 @@
|
|
|
|
|
| void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| Comment cmnt(masm_, "[ RegExp Literal");
|
|
|
| // Retrieve the literals array and check the allocated entry. Begin
|
| @@ -4804,6 +4918,7 @@
|
|
|
|
|
| void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| Comment cmnt(masm_, "[ ObjectLiteral");
|
|
|
| // Load a writable copy of the function of this activation in a
|
| @@ -4888,6 +5003,7 @@
|
|
|
|
|
| void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| Comment cmnt(masm_, "[ ArrayLiteral");
|
|
|
| // Load a writable copy of the function of this activation in a
|
| @@ -4959,6 +5075,7 @@
|
|
|
|
|
| void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| ASSERT(!in_spilled_code());
|
| // Call runtime routine to allocate the catch extension object and
|
| // assign the exception value to the catch variable.
|
| @@ -5178,6 +5295,7 @@
|
|
|
|
|
| void CodeGenerator::VisitAssignment(Assignment* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| #ifdef DEBUG
|
| int original_height = frame()->height();
|
| #endif
|
| @@ -5213,6 +5331,7 @@
|
|
|
|
|
| void CodeGenerator::VisitThrow(Throw* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| Comment cmnt(masm_, "[ Throw");
|
| Load(node->exception());
|
| Result result = frame_->CallRuntime(Runtime::kThrow, 1);
|
| @@ -5221,6 +5340,7 @@
|
|
|
|
|
| void CodeGenerator::VisitProperty(Property* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| Comment cmnt(masm_, "[ Property");
|
| Reference property(this, node);
|
| property.GetValue();
|
| @@ -5228,6 +5348,7 @@
|
|
|
|
|
| void CodeGenerator::VisitCall(Call* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| Comment cmnt(masm_, "[ Call");
|
|
|
| Expression* function = node->expression();
|
| @@ -5443,6 +5564,7 @@
|
|
|
|
|
| void CodeGenerator::VisitCallNew(CallNew* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| Comment cmnt(masm_, "[ CallNew");
|
|
|
| // According to ECMA-262, section 11.2.2, page 44, the function
|
| @@ -6370,6 +6492,7 @@
|
|
|
|
|
| void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| if (CheckForInlineRuntimeCall(node)) {
|
| return;
|
| }
|
| @@ -6496,64 +6619,100 @@
|
| }
|
|
|
| } else {
|
| - Load(node->expression());
|
| - bool overwrite =
|
| - (node->expression()->AsBinaryOperation() != NULL &&
|
| - node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
|
| - switch (op) {
|
| - case Token::SUB: {
|
| - GenericUnaryOpStub stub(Token::SUB, overwrite);
|
| - Result operand = frame_->Pop();
|
| - Result answer = frame_->CallStub(&stub, &operand);
|
| - frame_->Push(&answer);
|
| - break;
|
| + if (in_safe_int32_mode()) {
|
| + Visit(node->expression());
|
| + Result value = frame_->Pop();
|
| + ASSERT(value.is_untagged_int32());
|
| + // Registers containing an int32 value are not multiply used.
|
| + ASSERT(!value.is_register() || !frame_->is_used(value.reg()));
|
| + value.ToRegister();
|
| + switch (op) {
|
| + case Token::SUB: {
|
| + __ neg(value.reg());
|
| + if (node->no_negative_zero()) {
|
| + // -MIN_INT is MIN_INT with the overflow flag set.
|
| + unsafe_bailout_->Branch(overflow);
|
| + } else {
|
| + // MIN_INT and 0 both have bad negations. They both have 31 zeros.
|
| + __ test(value.reg(), Immediate(0x7FFFFFFF));
|
| + unsafe_bailout_->Branch(zero);
|
| + }
|
| + break;
|
| + }
|
| + case Token::BIT_NOT: {
|
| + __ not_(value.reg());
|
| + break;
|
| + }
|
| + case Token::ADD: {
|
| + // Unary plus has no effect on int32 values.
|
| + break;
|
| + }
|
| + default:
|
| + UNREACHABLE();
|
| + break;
|
| }
|
| + frame_->Push(&value);
|
|
|
| - case Token::BIT_NOT: {
|
| - // Smi check.
|
| - JumpTarget smi_label;
|
| - JumpTarget continue_label;
|
| - Result operand = frame_->Pop();
|
| - operand.ToRegister();
|
| - __ test(operand.reg(), Immediate(kSmiTagMask));
|
| - smi_label.Branch(zero, &operand, taken);
|
| + } else {
|
| + Load(node->expression());
|
| + bool overwrite =
|
| + (node->expression()->AsBinaryOperation() != NULL &&
|
| + node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
|
| + switch (op) {
|
| + case Token::SUB: {
|
| + GenericUnaryOpStub stub(Token::SUB, overwrite);
|
| + Result operand = frame_->Pop();
|
| + Result answer = frame_->CallStub(&stub, &operand);
|
| + frame_->Push(&answer);
|
| + break;
|
| + }
|
|
|
| - GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
|
| - Result answer = frame_->CallStub(&stub, &operand);
|
| - continue_label.Jump(&answer);
|
| + case Token::BIT_NOT: {
|
| + // Smi check.
|
| + JumpTarget smi_label;
|
| + JumpTarget continue_label;
|
| + Result operand = frame_->Pop();
|
| + operand.ToRegister();
|
| + __ test(operand.reg(), Immediate(kSmiTagMask));
|
| + smi_label.Branch(zero, &operand, taken);
|
|
|
| - smi_label.Bind(&answer);
|
| - answer.ToRegister();
|
| - frame_->Spill(answer.reg());
|
| - __ not_(answer.reg());
|
| - __ and_(answer.reg(), ~kSmiTagMask); // Remove inverted smi-tag.
|
| + GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
|
| + Result answer = frame_->CallStub(&stub, &operand);
|
| + continue_label.Jump(&answer);
|
|
|
| - continue_label.Bind(&answer);
|
| - frame_->Push(&answer);
|
| - break;
|
| - }
|
| + smi_label.Bind(&answer);
|
| + answer.ToRegister();
|
| + frame_->Spill(answer.reg());
|
| + __ not_(answer.reg());
|
| + __ and_(answer.reg(), ~kSmiTagMask); // Remove inverted smi-tag.
|
|
|
| - case Token::ADD: {
|
| - // Smi check.
|
| - JumpTarget continue_label;
|
| - Result operand = frame_->Pop();
|
| - operand.ToRegister();
|
| - __ test(operand.reg(), Immediate(kSmiTagMask));
|
| - continue_label.Branch(zero, &operand, taken);
|
| + continue_label.Bind(&answer);
|
| + frame_->Push(&answer);
|
| + break;
|
| + }
|
|
|
| - frame_->Push(&operand);
|
| - Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
|
| + case Token::ADD: {
|
| + // Smi check.
|
| + JumpTarget continue_label;
|
| + Result operand = frame_->Pop();
|
| + operand.ToRegister();
|
| + __ test(operand.reg(), Immediate(kSmiTagMask));
|
| + continue_label.Branch(zero, &operand, taken);
|
| +
|
| + frame_->Push(&operand);
|
| + Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
|
| CALL_FUNCTION, 1);
|
|
|
| - continue_label.Bind(&answer);
|
| - frame_->Push(&answer);
|
| - break;
|
| + continue_label.Bind(&answer);
|
| + frame_->Push(&answer);
|
| + break;
|
| + }
|
| +
|
| + default:
|
| + // NOT, DELETE, TYPEOF, and VOID are handled outside the
|
| + // switch.
|
| + UNREACHABLE();
|
| }
|
| -
|
| - default:
|
| - // NOT, DELETE, TYPEOF, and VOID are handled outside the
|
| - // switch.
|
| - UNREACHABLE();
|
| }
|
| }
|
| }
|
| @@ -6646,6 +6805,7 @@
|
|
|
|
|
| void CodeGenerator::VisitCountOperation(CountOperation* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| Comment cmnt(masm_, "[ CountOperation");
|
|
|
| bool is_postfix = node->is_postfix();
|
| @@ -6759,6 +6919,166 @@
|
| }
|
|
|
|
|
| +void CodeGenerator::Int32BinaryOperation(BinaryOperation* node) {
|
| + Token::Value op = node->op();
|
| + Comment cmnt(masm_, "[ Int32BinaryOperation");
|
| + ASSERT(in_safe_int32_mode());
|
| + ASSERT(safe_int32_mode_enabled());
|
| + ASSERT(FLAG_safe_int32_compiler);
|
| +
|
| + if (op == Token::COMMA) {
|
| + // Discard left value.
|
| + frame_->Nip(1);
|
| + return;
|
| + }
|
| +
|
| + Result right = frame_->Pop();
|
| + Result left = frame_->Pop();
|
| +
|
| + ASSERT(right.is_untagged_int32());
|
| + ASSERT(left.is_untagged_int32());
|
| + // Registers containing an int32 value are not multiply used.
|
| + ASSERT(!left.is_register() || !frame_->is_used(left.reg()));
|
| + ASSERT(!right.is_register() || !frame_->is_used(right.reg()));
|
| +
|
| + switch (op) {
|
| + case Token::COMMA:
|
| + case Token::OR:
|
| + case Token::AND:
|
| + UNREACHABLE();
|
| + break;
|
| + case Token::BIT_OR:
|
| + case Token::BIT_XOR:
|
| + case Token::BIT_AND:
|
| + left.ToRegister();
|
| + right.ToRegister();
|
| + if (op == Token::BIT_OR) {
|
| + __ or_(left.reg(), Operand(right.reg()));
|
| + } else if (op == Token::BIT_XOR) {
|
| + __ xor_(left.reg(), Operand(right.reg()));
|
| + } else {
|
| + ASSERT(op == Token::BIT_AND);
|
| + __ and_(left.reg(), Operand(right.reg()));
|
| + }
|
| + frame_->Push(&left);
|
| + right.Unuse();
|
| + break;
|
| + case Token::SAR:
|
| + case Token::SHL:
|
| + case Token::SHR: {
|
| + bool test_shr_overflow = false;
|
| + left.ToRegister();
|
| + if (right.is_constant()) {
|
| + ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
|
| + int shift_amount = NumberToInt32(*right.handle()) & 0x1F;
|
| + if (op == Token::SAR) {
|
| + __ sar(left.reg(), shift_amount);
|
| + } else if (op == Token::SHL) {
|
| + __ shl(left.reg(), shift_amount);
|
| + } else {
|
| + ASSERT(op == Token::SHR);
|
| + __ shr(left.reg(), shift_amount);
|
| + if (shift_amount == 0) test_shr_overflow = true;
|
| + }
|
| + } else {
|
| + // Move right to ecx
|
| + if (left.is_register() && left.reg().is(ecx)) {
|
| + right.ToRegister();
|
| + __ xchg(left.reg(), right.reg());
|
| + left = right; // Left is unused here, copy of right unused by Push.
|
| + } else {
|
| + right.ToRegister(ecx);
|
| + left.ToRegister();
|
| + }
|
| + if (op == Token::SAR) {
|
| + __ sar_cl(left.reg());
|
| + } else if (op == Token::SHL) {
|
| + __ shl_cl(left.reg());
|
| + } else {
|
| + ASSERT(op == Token::SHR);
|
| + __ shr_cl(left.reg());
|
| + test_shr_overflow = true;
|
| + }
|
| + }
|
| + {
|
| + Register left_reg = left.reg();
|
| + frame_->Push(&left);
|
| + right.Unuse();
|
| + if (test_shr_overflow && !node->to_int32()) {
|
| + // Uint32 results with top bit set are not Int32 values.
|
| + // If they will be forced to Int32, skip the test.
|
| + // Test is needed because shr with shift amount 0 does not set flags.
|
| + __ test(left_reg, Operand(left_reg));
|
| + unsafe_bailout_->Branch(sign);
|
| + }
|
| + }
|
| + break;
|
| + }
|
| + case Token::ADD:
|
| + case Token::SUB:
|
| + case Token::MUL:
|
| + left.ToRegister();
|
| + right.ToRegister();
|
| + if (op == Token::ADD) {
|
| + __ add(left.reg(), Operand(right.reg()));
|
| + } else if (op == Token::SUB) {
|
| + __ sub(left.reg(), Operand(right.reg()));
|
| + } else {
|
| + ASSERT(op == Token::MUL);
|
| + // We have statically verified that a negative zero can be ignored.
|
| + __ imul(left.reg(), Operand(right.reg()));
|
| + }
|
| + right.Unuse();
|
| + frame_->Push(&left);
|
| + if (!node->to_int32()) {
|
| + // If ToInt32 is called on the result of ADD, SUB, or MUL, we don't
|
| + // care about overflows.
|
| + unsafe_bailout_->Branch(overflow);
|
| + }
|
| + break;
|
| + case Token::DIV:
|
| + case Token::MOD: {
|
| + if (right.is_register() && (right.reg().is(eax) || right.reg().is(edx))) {
|
| + if (left.is_register() && left.reg().is(edi)) {
|
| + right.ToRegister(ebx);
|
| + } else {
|
| + right.ToRegister(edi);
|
| + }
|
| + }
|
| + left.ToRegister(eax);
|
| + Result edx_reg = allocator_->Allocate(edx);
|
| + right.ToRegister();
|
| + // The results are unused here because BreakTarget::Branch cannot handle
|
| + // live results.
|
| + Register right_reg = right.reg();
|
| + left.Unuse();
|
| + right.Unuse();
|
| + edx_reg.Unuse();
|
| + __ cmp(right_reg, 0);
|
| + // Ensure divisor is positive: no chance of non-int32 or -0 result.
|
| + unsafe_bailout_->Branch(less_equal);
|
| + __ cdq(); // Sign-extend eax into edx:eax
|
| + __ idiv(right_reg);
|
| + if (op == Token::MOD) {
|
| + Result edx_result(edx, NumberInfo::Integer32());
|
| + edx_result.set_untagged_int32(true);
|
| + frame_->Push(&edx_result);
|
| + } else {
|
| + ASSERT(op == Token::DIV);
|
| + __ test(edx, Operand(edx));
|
| + unsafe_bailout_->Branch(not_equal);
|
| + Result eax_result(eax, NumberInfo::Integer32());
|
| + eax_result.set_untagged_int32(true);
|
| + frame_->Push(&eax_result);
|
| + }
|
| + break;
|
| + }
|
| + default:
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| +}
|
| +
|
| void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
|
| Comment cmnt(masm_, "[ BinaryOperation");
|
| Token::Value op = node->op();
|
| @@ -6773,6 +7093,7 @@
|
| // is necessary because we assume that if we get control flow on the
|
| // last path out of an expression we got it on all paths.
|
| if (op == Token::AND) {
|
| + ASSERT(!in_safe_int32_mode());
|
| JumpTarget is_true;
|
| ControlDestination dest(&is_true, destination()->false_target(), true);
|
| LoadCondition(node->left(), &dest, false);
|
| @@ -6836,6 +7157,7 @@
|
| }
|
|
|
| } else if (op == Token::OR) {
|
| + ASSERT(!in_safe_int32_mode());
|
| JumpTarget is_false;
|
| ControlDestination dest(destination()->true_target(), &is_false, false);
|
| LoadCondition(node->left(), &dest, false);
|
| @@ -6897,6 +7219,10 @@
|
| exit.Bind();
|
| }
|
|
|
| + } else if (in_safe_int32_mode()) {
|
| + Visit(node->left());
|
| + Visit(node->right());
|
| + Int32BinaryOperation(node);
|
| } else {
|
| // NOTE: The code below assumes that the slow cases (calls to runtime)
|
| // never return a constant/immutable object.
|
| @@ -6925,11 +7251,13 @@
|
|
|
|
|
| void CodeGenerator::VisitThisFunction(ThisFunction* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| frame_->PushFunction();
|
| }
|
|
|
|
|
| void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
|
| + ASSERT(!in_safe_int32_mode());
|
| Comment cmnt(masm_, "[ CompareOperation");
|
|
|
| bool left_already_loaded = false;
|
|
|