Chromium Code Reviews| Index: src/arm/codegen-arm.cc |
| =================================================================== |
| --- src/arm/codegen-arm.cc (revision 4770) |
| +++ src/arm/codegen-arm.cc (working copy) |
| @@ -109,30 +109,41 @@ |
| CodeGenState::CodeGenState(CodeGenerator* owner) |
| : owner_(owner), |
| - true_target_(NULL), |
| - false_target_(NULL), |
| - previous_(NULL) { |
| - owner_->set_state(this); |
| + previous_(owner->state()) { |
| + owner->set_state(this); |
| } |
| -CodeGenState::CodeGenState(CodeGenerator* owner, |
| - JumpTarget* true_target, |
| - JumpTarget* false_target) |
| - : owner_(owner), |
| +ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner, |
| + JumpTarget* true_target, |
| + JumpTarget* false_target) |
| + : CodeGenState(owner), |
| true_target_(true_target), |
| - false_target_(false_target), |
| - previous_(owner->state()) { |
| - owner_->set_state(this); |
| + false_target_(false_target) { |
| + owner->set_state(this); |
| } |
| +TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner, |
| + Slot* slot, |
| + TypeInfo type_info) |
| + : CodeGenState(owner), |
| + slot_(slot) { |
| + owner->set_state(this); |
| + old_type_info_ = owner->set_type_info(slot, type_info); |
| +} |
| + |
| + |
| CodeGenState::~CodeGenState() { |
| ASSERT(owner_->state() == this); |
| owner_->set_state(previous_); |
| } |
| +TypeInfoCodeGenState::~TypeInfoCodeGenState() { |
| + owner()->set_type_info(slot_, old_type_info_); |
| +} |
| + |
| // ------------------------------------------------------------------------- |
| // CodeGenerator implementation |
| @@ -145,6 +156,7 @@ |
| cc_reg_(al), |
| state_(NULL), |
| loop_nesting_(0), |
| + type_info_(NULL), |
| function_return_is_shadowed_(false) { |
| } |
| @@ -162,6 +174,11 @@ |
| // Initialize state. |
| info_ = info; |
| + |
| + int slots = scope()->num_parameters() + scope()->num_stack_slots(); |
| + ScopedVector<TypeInfo> type_info_array(slots); |
| + type_info_ = &type_info_array; |
| + |
| ASSERT(allocator_ == NULL); |
| RegisterAllocator register_allocator(this); |
| allocator_ = ®ister_allocator; |
| @@ -393,9 +410,24 @@ |
| } |
| allocator_ = NULL; |
| + type_info_ = NULL; |
| } |
| +int CodeGenerator::NumberOfSlot(Slot* slot) { |
| + if (slot == NULL) return kInvalidSlotNumber; |
| + switch(slot->type()) { |
| + case Slot::PARAMETER: |
| + return slot->index(); |
| + case Slot::LOCAL: |
| + return slot->index() + scope()->num_parameters(); |
| + default: |
| + break; |
| + } |
| + return kInvalidSlotNumber; |
| +} |
| + |
| + |
| MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { |
| // Currently, this assertion will fail if we try to assign to |
| // a constant variable that is constant because it is read-only |
| @@ -490,7 +522,7 @@ |
| ASSERT(!has_cc()); |
| int original_height = frame_->height(); |
| - { CodeGenState new_state(this, true_target, false_target); |
| + { ConditionCodeGenState new_state(this, true_target, false_target); |
| Visit(x); |
| // If we hit a stack overflow, we may not have actually visited |
| @@ -789,50 +821,9 @@ |
| } |
| -void CodeGenerator::GenericBinaryOperation(Token::Value op, |
| - OverwriteMode overwrite_mode, |
| - int constant_rhs) { |
| - VirtualFrame::SpilledScope spilled_scope(frame_); |
| - // sp[0] : y |
| - // sp[1] : x |
| - // result : r0 |
| - |
| - // Stub is entered with a call: 'return address' is in lr. |
| - switch (op) { |
| - case Token::ADD: |
| - case Token::SUB: |
| - case Token::MUL: |
| - case Token::DIV: |
| - case Token::MOD: |
| - case Token::BIT_OR: |
| - case Token::BIT_AND: |
| - case Token::BIT_XOR: |
| - case Token::SHL: |
| - case Token::SHR: |
| - case Token::SAR: { |
| - frame_->EmitPop(r0); // r0 : y |
| - frame_->EmitPop(r1); // r1 : x |
| - GenericBinaryOpStub stub(op, overwrite_mode, r1, r0, constant_rhs); |
| - frame_->CallStub(&stub, 0); |
| - break; |
| - } |
| - |
| - case Token::COMMA: |
| - frame_->EmitPop(r0); |
| - // Simply discard left value. |
| - frame_->Drop(); |
| - break; |
| - |
| - default: |
| - // Other cases should have been handled before this point. |
| - UNREACHABLE(); |
| - break; |
| - } |
| -} |
| - |
| - |
| void CodeGenerator::VirtualFrameBinaryOperation(Token::Value op, |
|
Søren Thygesen Gjesse
2010/06/02 09:24:01
VirtualFrameBinaryOperation -> GenericBinaryOperat
|
| OverwriteMode overwrite_mode, |
| + GenerateInlineSmi inline_smi, |
| int constant_rhs) { |
| // top of virtual frame: y |
| // 2nd elt. on virtual frame : x |
| @@ -840,14 +831,84 @@ |
| // Stub is entered with a call: 'return address' is in lr. |
| switch (op) { |
| - case Token::ADD: // fall through. |
| - case Token::SUB: // fall through. |
| + case Token::ADD: |
| + case Token::SUB: |
| + if (inline_smi) { |
| + JumpTarget done; |
| + Register rhs = frame_->PopToRegister(); |
| + Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register. |
| + Register scratch = VirtualFrame::scratch0(); |
| + __ orr(scratch, rhs, Operand(lhs)); |
| + // Check they are both small and positive. |
| + __ tst(scratch, Operand(kSmiTagMask | 0xc0000000)); |
| + ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now. |
| + ASSERT_EQ(0, kSmiTag); |
| + if (op == Token::ADD) { |
| + __ add(r0, lhs, Operand(rhs), LeaveCC, eq); |
| + } else { |
| + __ sub(r0, lhs, Operand(rhs), LeaveCC, eq); |
| + } |
| + done.Branch(eq); |
| + GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs); |
| + frame_->SpillAll(); |
| + frame_->CallStub(&stub, 0); |
| + done.Bind(); |
| + frame_->EmitPush(r0); |
| + break; |
| + } else { |
| + // Fall through! |
| + } |
| + case Token::BIT_OR: |
| + case Token::BIT_AND: |
| + case Token::BIT_XOR: |
| + if (inline_smi) { |
| + bool rhs_is_smi = frame_->KnownSmiAt(0); |
| + bool lhs_is_smi = frame_->KnownSmiAt(1); |
| + Register rhs = frame_->PopToRegister(); |
| + Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register. |
|
Søren Thygesen Gjesse
2010/06/02 09:24:01
Long line.
|
| + Register smi_test_reg; |
| + Condition cond; |
| + if (!rhs_is_smi || !lhs_is_smi) { |
| + if (!rhs_is_smi) { |
| + smi_test_reg = rhs; |
| + } else if (!lhs_is_smi) { |
| + smi_test_reg = lhs; |
| + } else { |
| + smi_test_reg = VirtualFrame::scratch0(); |
| + __ orr(smi_test_reg, rhs, Operand(lhs)); |
| + } |
| + // Check they are both Smis. |
| + __ tst(smi_test_reg, Operand(kSmiTagMask)); |
| + cond = eq; |
| + } else { |
| + cond = al; |
| + } |
| + ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now. |
| + if (op == Token::BIT_OR) { |
| + __ orr(r0, lhs, Operand(rhs), LeaveCC, cond); |
| + } else if (op == Token::BIT_AND) { |
| + __ and_(r0, lhs, Operand(rhs), LeaveCC, cond); |
| + } else { |
| + ASSERT(op == Token::BIT_XOR); |
| + ASSERT_EQ(0, kSmiTag); |
| + __ eor(r0, lhs, Operand(rhs), LeaveCC, cond); |
| + } |
| + if (cond != al) { |
| + JumpTarget done; |
| + done.Branch(cond); |
| + GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs); |
| + frame_->SpillAll(); |
| + frame_->CallStub(&stub, 0); |
| + done.Bind(); |
| + } |
| + frame_->EmitPush(r0); |
| + break; |
| + } else { |
| + // Fall through! |
| + } |
| case Token::MUL: |
| case Token::DIV: |
| case Token::MOD: |
| - case Token::BIT_OR: |
| - case Token::BIT_AND: |
| - case Token::BIT_XOR: |
| case Token::SHL: |
| case Token::SHR: |
| case Token::SAR: { |
| @@ -972,7 +1033,8 @@ |
| rhs = r1; |
| } |
| } else { |
| - UNREACHABLE(); // Should have been handled in SmiOperation. |
| + ASSERT(op_ == Token::SHL); |
| + __ mov(r1, Operand(Smi::FromInt(value_))); |
| } |
| break; |
| } |
| @@ -1020,6 +1082,8 @@ |
| OverwriteMode mode) { |
| int int_value = Smi::cast(*value)->value(); |
| + bool both_sides_are_smi = frame_->KnownSmiAt(0); |
| + |
| bool something_to_inline; |
| switch (op) { |
| case Token::ADD: |
| @@ -1030,7 +1094,10 @@ |
| something_to_inline = true; |
| break; |
| } |
| - case Token::SHL: |
| + case Token::SHL: { |
| + something_to_inline = (both_sides_are_smi || !reversed); |
| + break; |
| + } |
| case Token::SHR: |
| case Token::SAR: { |
| if (reversed) { |
| @@ -1067,17 +1134,18 @@ |
| // Push the rhs onto the virtual frame by putting it in a TOS register. |
| Register rhs = frame_->GetTOSRegister(); |
| __ mov(rhs, Operand(value)); |
| - frame_->EmitPush(rhs); |
| - VirtualFrameBinaryOperation(op, mode, int_value); |
| + frame_->EmitPush(rhs, TypeInfo::Smi()); |
| + VirtualFrameBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value); |
| } else { |
| // Pop the rhs, then push lhs and rhs in the right order. Only performs |
| // at most one pop, the rest takes place in TOS registers. |
| Register lhs = frame_->GetTOSRegister(); // Get reg for pushing. |
| Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this. |
| __ mov(lhs, Operand(value)); |
| - frame_->EmitPush(lhs); |
| - frame_->EmitPush(rhs); |
| - VirtualFrameBinaryOperation(op, mode, kUnknownIntValue); |
| + frame_->EmitPush(lhs, TypeInfo::Smi()); |
| + TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown(); |
| + frame_->EmitPush(rhs, t); |
| + VirtualFrameBinaryOperation(op, mode, GENERATE_INLINE_SMI, kUnknownIntValue); |
|
Søren Thygesen Gjesse
2010/06/02 09:24:01
Long line.
|
| } |
| return; |
| } |
| @@ -1097,8 +1165,10 @@ |
| __ add(tos, tos, Operand(value), SetCC); |
| deferred->Branch(vs); |
| - __ tst(tos, Operand(kSmiTagMask)); |
| - deferred->Branch(ne); |
| + if (!both_sides_are_smi) { |
| + __ tst(tos, Operand(kSmiTagMask)); |
| + deferred->Branch(ne); |
| + } |
| deferred->BindExit(); |
| frame_->EmitPush(tos); |
| break; |
| @@ -1114,8 +1184,10 @@ |
| __ sub(tos, tos, Operand(value), SetCC); |
| } |
| deferred->Branch(vs); |
| - __ tst(tos, Operand(kSmiTagMask)); |
| - deferred->Branch(ne); |
| + if (!both_sides_are_smi) { |
| + __ tst(tos, Operand(kSmiTagMask)); |
| + deferred->Branch(ne); |
| + } |
| deferred->BindExit(); |
| frame_->EmitPush(tos); |
| break; |
| @@ -1125,25 +1197,65 @@ |
| case Token::BIT_OR: |
| case Token::BIT_XOR: |
| case Token::BIT_AND: { |
| - DeferredCode* deferred = |
| - new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); |
| - __ tst(tos, Operand(kSmiTagMask)); |
| - deferred->Branch(ne); |
| - switch (op) { |
| - case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; |
| - case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; |
| - case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break; |
| - default: UNREACHABLE(); |
| + if (both_sides_are_smi) { |
| + switch (op) { |
| + case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; |
| + case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; |
| + case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break; |
| + default: UNREACHABLE(); |
| + } |
| + frame_->EmitPush(tos, TypeInfo::Smi()); |
| + } else { |
| + DeferredCode* deferred = |
| + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); |
| + __ tst(tos, Operand(kSmiTagMask)); |
| + deferred->Branch(ne); |
| + switch (op) { |
| + case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; |
| + case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; |
| + case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break; |
| + default: UNREACHABLE(); |
| + } |
| + deferred->BindExit(); |
| + TypeInfo result_type = |
| + (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32(); |
| + frame_->EmitPush(tos, result_type); |
| } |
| - deferred->BindExit(); |
| - frame_->EmitPush(tos); |
| break; |
| } |
| case Token::SHL: |
| + if (reversed) { |
| + ASSERT(both_sides_are_smi); |
| + int max_shift = 0; |
| + int max_result = int_value == 0 ? 1 : int_value; |
| + while (Smi::IsValid(max_result << 1)) { |
| + max_shift++; |
| + max_result <<= 1; |
| + } |
| + DeferredCode* deferred = |
| + new DeferredInlineSmiOperation(op, int_value, true, mode, tos); |
| + // Mask off the last 5 bits of the shift operand (rhs). This is part |
| + // of the definition of shift in JS and we know we have a Smi so we |
| + // can safely do this. The masked version gets passed to the |
| + // deferred code, but that makes no difference. |
| + __ and_(tos, tos, Operand(Smi::FromInt(0x1f))); |
| + __ cmp(tos, Operand(Smi::FromInt(max_shift))); |
| + deferred->Branch(ge); |
| + Register scratch = VirtualFrame::scratch0(); |
| + __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Untag. |
| + __ mov(tos, Operand(Smi::FromInt(int_value))); // Load constant. |
| + __ mov(tos, Operand(tos, LSL, scratch)); // Shift constant. |
| + deferred->BindExit(); |
| + TypeInfo result = TypeInfo::Integer32(); |
| + frame_->EmitPush(tos, result); |
| + break; |
| + } |
| + // Fall through! |
| case Token::SHR: |
| case Token::SAR: { |
| ASSERT(!reversed); |
| + TypeInfo result = TypeInfo::Integer32(); |
| Register scratch = VirtualFrame::scratch0(); |
| Register scratch2 = VirtualFrame::scratch1(); |
| int shift_value = int_value & 0x1f; // least significant 5 bits |
| @@ -1151,9 +1263,15 @@ |
| new DeferredInlineSmiOperation(op, shift_value, false, mode, tos); |
| uint32_t problematic_mask = kSmiTagMask; |
| // For unsigned shift by zero all negative smis are problematic. |
| - if (shift_value == 0 && op == Token::SHR) problematic_mask |= 0x80000000; |
| - __ tst(tos, Operand(problematic_mask)); |
| - deferred->Branch(ne); // Go slow for problematic input. |
| + bool skip_smi_test = both_sides_are_smi; |
| + if (shift_value == 0 && op == Token::SHR) { |
| + problematic_mask |= 0x80000000; |
| + skip_smi_test = false; |
| + } |
| + if (!skip_smi_test) { |
| + __ tst(tos, Operand(problematic_mask)); |
| + deferred->Branch(ne); // Go slow for problematic input. |
| + } |
| switch (op) { |
| case Token::SHL: { |
| if (shift_value != 0) { |
| @@ -1188,6 +1306,9 @@ |
| // by 0 or 1 when handed a valid smi |
| __ tst(scratch, Operand(0xc0000000)); |
| deferred->Branch(ne); |
| + } else { |
| + ASSERT(shift_value >= 2); |
| + result = TypeInfo::Smi(); // SHR by at least 2 gives a Smi. |
| } |
| __ mov(tos, Operand(scratch, LSL, kSmiTagSize)); |
| } |
| @@ -1204,13 +1325,15 @@ |
| __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f)); |
| // Put tag back. |
| __ mov(tos, Operand(tos, LSL, kSmiTagSize)); |
| + // SAR by at least 1 gives a Smi. |
| + result = TypeInfo::Smi(); |
| } |
| break; |
| } |
| default: UNREACHABLE(); |
| } |
| deferred->BindExit(); |
| - frame_->EmitPush(tos); |
| + frame_->EmitPush(tos, result); |
| break; |
| } |
| @@ -1219,21 +1342,24 @@ |
| ASSERT(int_value >= 2); |
| ASSERT(IsPowerOf2(int_value)); |
| DeferredCode* deferred = |
| - new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); |
| + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); |
| unsigned mask = (0x80000000u | kSmiTagMask); |
| __ tst(tos, Operand(mask)); |
| deferred->Branch(ne); // Go to deferred code on non-Smis and negative. |
| mask = (int_value << kSmiTagSize) - 1; |
| __ and_(tos, tos, Operand(mask)); |
| deferred->BindExit(); |
| - frame_->EmitPush(tos); |
| + // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer. |
| + frame_->EmitPush( |
| + tos, |
| + both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number()); |
| break; |
| } |
| case Token::MUL: { |
| ASSERT(IsEasyToMultiplyBy(int_value)); |
| DeferredCode* deferred = |
| - new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); |
| + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); |
| unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value; |
| max_smi_that_wont_overflow <<= kSmiTagSize; |
| unsigned mask = 0x80000000u; |
| @@ -1279,45 +1405,66 @@ |
| Register lhs; |
| Register rhs; |
| + bool lhs_is_smi; |
| + bool rhs_is_smi; |
| + |
| // We load the top two stack positions into registers chosen by the virtual |
| // frame. This should keep the register shuffling to a minimum. |
| // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. |
| if (cc == gt || cc == le) { |
| cc = ReverseCondition(cc); |
| + lhs_is_smi = frame_->KnownSmiAt(0); |
| + rhs_is_smi = frame_->KnownSmiAt(1); |
| lhs = frame_->PopToRegister(); |
| rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again! |
| } else { |
| + rhs_is_smi = frame_->KnownSmiAt(0); |
| + lhs_is_smi = frame_->KnownSmiAt(1); |
| rhs = frame_->PopToRegister(); |
| lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again! |
| } |
| + bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi); |
| + |
| ASSERT(rhs.is(r0) || rhs.is(r1)); |
| ASSERT(lhs.is(r0) || lhs.is(r1)); |
| - // Now we have the two sides in r0 and r1. We flush any other registers |
| - // because the stub doesn't know about register allocation. |
| - frame_->SpillAll(); |
| - Register scratch = VirtualFrame::scratch0(); |
| - __ orr(scratch, lhs, Operand(rhs)); |
| - __ tst(scratch, Operand(kSmiTagMask)); |
| - JumpTarget smi; |
| - smi.Branch(eq); |
| + JumpTarget exit; |
| - // Perform non-smi comparison by stub. |
| - // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0. |
| - // We call with 0 args because there are 0 on the stack. |
| - if (!rhs.is(r0)) { |
| - __ Swap(rhs, lhs, ip); |
| + if (!both_sides_are_smi) { |
| + // Now we have the two sides in r0 and r1. We flush any other registers |
| + // because the stub doesn't know about register allocation. |
| + frame_->SpillAll(); |
| + Register scratch = VirtualFrame::scratch0(); |
| + Register smi_test_reg; |
| + if (lhs_is_smi) { |
| + smi_test_reg = rhs; |
| + } else if (rhs_is_smi) { |
| + smi_test_reg = lhs; |
| + } else { |
| + __ orr(scratch, lhs, Operand(rhs)); |
| + smi_test_reg = scratch; |
| + } |
| + __ tst(smi_test_reg, Operand(kSmiTagMask)); |
| + JumpTarget smi; |
| + smi.Branch(eq); |
| + |
| + // Perform non-smi comparison by stub. |
| + // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0. |
| + // We call with 0 args because there are 0 on the stack. |
| + if (!rhs.is(r0)) { |
| + __ Swap(rhs, lhs, ip); |
| + } |
| + |
| + CompareStub stub(cc, strict); |
| + frame_->CallStub(&stub, 0); |
| + __ cmp(r0, Operand(0)); |
| + exit.Jump(); |
| + |
| + smi.Bind(); |
| } |
| - CompareStub stub(cc, strict); |
| - frame_->CallStub(&stub, 0); |
| - __ cmp(r0, Operand(0)); |
| - JumpTarget exit; |
| - exit.Jump(); |
| - |
| // Do smi comparisons by pointer comparison. |
| - smi.Bind(); |
| __ cmp(lhs, Operand(rhs)); |
| exit.Bind(); |
| @@ -2090,6 +2237,17 @@ |
| node->break_target()->SetExpectedHeight(); |
| IncrementLoopNesting(); |
| + // We know that the loop index is a smi if it is not modified in the |
| + // loop body and it is checked against a constant limit in the loop |
| + // condition. In this case, we reset the static type information of the |
| + // loop index to smi before compiling the body, the update expression, and |
| + // the bottom check of the loop condition. |
| + TypeInfoCodeGenState type_info_scope(this, |
| + node->is_fast_smi_loop() ? |
| + node->loop_variable()->slot() : |
| + NULL, |
| + TypeInfo::Smi()); |
| + |
| // If there is no update statement, label the top of the loop with the |
| // continue target, otherwise with the loop target. |
| JumpTarget loop(JumpTarget::BIDIRECTIONAL); |
| @@ -2810,7 +2968,8 @@ |
| } else { |
| Register scratch = VirtualFrame::scratch0(); |
| - frame_->EmitPush(SlotOperand(slot, scratch)); |
| + TypeInfo info = type_info(slot); |
| + frame_->EmitPush(SlotOperand(slot, scratch), info); |
| if (slot->var()->mode() == Variable::CONST) { |
| // Const slots may contain 'the hole' value (the constant hasn't been |
| // initialized yet) which needs to be converted into the 'undefined' |
| @@ -3100,8 +3259,9 @@ |
| #endif |
| Comment cmnt(masm_, "[ Literal"); |
| Register reg = frame_->GetTOSRegister(); |
| + bool is_smi = node->handle()->IsSmi(); |
| __ mov(reg, Operand(node->handle())); |
| - frame_->EmitPush(reg); |
| + frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown()); |
| ASSERT_EQ(original_height + 1, frame_->height()); |
| } |
| @@ -3332,9 +3492,14 @@ |
| false, |
| overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); |
| } else { |
| + GenerateInlineSmi inline_smi = |
| + loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI; |
| + if (literal != NULL) inline_smi = DONT_GENERATE_INLINE_SMI; |
|
Søren Thygesen Gjesse
2010/06/02 09:24:01
Maybe assert !literal.IsSmi() when it is != NULL (
|
| Load(node->value()); |
| VirtualFrameBinaryOperation( |
| - node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); |
| + node->binary_op(), |
| + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE, |
| + inline_smi); |
| } |
| } else { |
| Load(node->value()); |
| @@ -3425,9 +3590,14 @@ |
| false, |
| overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); |
| } else { |
| + GenerateInlineSmi inline_smi = |
| + loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI; |
| + if (literal != NULL) inline_smi = DONT_GENERATE_INLINE_SMI; |
| Load(node->value()); |
| VirtualFrameBinaryOperation( |
| - node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); |
| + node->binary_op(), |
| + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE, |
| + inline_smi); |
| } |
| } else { |
| // For non-compound assignment just load the right-hand side. |
| @@ -3532,9 +3702,14 @@ |
| false, |
| overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); |
| } else { |
| + GenerateInlineSmi inline_smi = |
| + loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI; |
| + if (literal != NULL) inline_smi = DONT_GENERATE_INLINE_SMI; |
| Load(node->value()); |
| VirtualFrameBinaryOperation( |
| - node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); |
| + node->binary_op(), |
| + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE, |
| + inline_smi); |
| } |
| } else { |
| // For non-compound assignment just load the right-hand side. |
| @@ -5086,9 +5261,36 @@ |
| Variable* var = node->expression()->AsVariableProxy()->AsVariable(); |
| bool is_const = (var != NULL && var->mode() == Variable::CONST); |
| + bool is_slot = (var != NULL && var->mode() == Variable::VAR); |
| - if (is_postfix) { |
| + if (!is_const && is_slot && type_info(var->slot()).IsSmi()) { |
| + // The type info declares that this variable is always a Smi. That |
| + // means it is a Smi both before and after the increment/decrement. |
| + // Lets make use of that to make a very minimal count. |
| + Reference target(this, node->expression(), !is_const); |
| + ASSERT(!target.is_illegal()); |
| + target.GetValue(); // Pushes the value. |
| + Register value = frame_->PopToRegister(); |
| + if (is_postfix) frame_->EmitPush(value); |
| + if (is_increment) { |
| + __ add(value, value, Operand(Smi::FromInt(1))); |
| + } else { |
| + __ sub(value, value, Operand(Smi::FromInt(1))); |
| + } |
| + frame_->EmitPush(value); |
| + target.SetValue(NOT_CONST_INIT); |
| + if (is_postfix) frame_->Pop(); |
| + ASSERT_EQ(original_height + 1, frame_->height()); |
| + return; |
| + } |
| + |
| + // If it's a postfix expression and its result is not ignored and the |
| + // reference is non-trivial, then push a placeholder on the stack now |
| + // to hold the result of the expression. |
| + bool placeholder_pushed = false; |
| + if (!is_slot && is_postfix) { |
| frame_->EmitPush(Operand(Smi::FromInt(0))); |
| + placeholder_pushed = true; |
| } |
| // A constant reference is not saved to, so a constant reference is not a |
| @@ -5097,12 +5299,11 @@ |
| if (target.is_illegal()) { |
| // Spoof the virtual frame to have the expected height (one higher |
| // than on entry). |
| - if (!is_postfix) { |
| - frame_->EmitPush(Operand(Smi::FromInt(0))); |
| - } |
| + if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0))); |
| ASSERT_EQ(original_height + 1, frame_->height()); |
| return; |
| } |
| + |
| // This pushes 0, 1 or 2 words on the object to be used later when updating |
| // the target. It also pushes the current value of the target. |
| target.GetValue(); |
| @@ -5110,16 +5311,21 @@ |
| JumpTarget slow; |
| JumpTarget exit; |
| - // Check for smi operand. |
| Register value = frame_->PopToRegister(); |
| - __ tst(value, Operand(kSmiTagMask)); |
| - slow.Branch(ne); |
| // Postfix: Store the old value as the result. |
| - if (is_postfix) { |
| + if (placeholder_pushed) { |
| frame_->SetElementAt(value, target.size()); |
| + } else if (is_postfix) { |
| + frame_->EmitPush(value); |
| + __ mov(VirtualFrame::scratch0(), value); |
| + value = VirtualFrame::scratch0(); |
| } |
| + // Check for smi operand. |
| + __ tst(value, Operand(kSmiTagMask)); |
| + slow.Branch(ne); |
| + |
| // Perform optimistic increment/decrement. |
| if (is_increment) { |
| __ add(value, value, Operand(Smi::FromInt(1)), SetCC); |
| @@ -5300,18 +5506,24 @@ |
| if (rliteral != NULL && rliteral->handle()->IsSmi()) { |
| VirtualFrame::RegisterAllocationScope scope(this); |
| Load(node->left()); |
| + if (frame_->KnownSmiAt(0)) overwrite_left = false; |
| SmiOperation(node->op(), |
| rliteral->handle(), |
| false, |
| - overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE); |
| + overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE); |
| } else if (lliteral != NULL && lliteral->handle()->IsSmi()) { |
| VirtualFrame::RegisterAllocationScope scope(this); |
| Load(node->right()); |
| + if (frame_->KnownSmiAt(0)) overwrite_right = false; |
| SmiOperation(node->op(), |
| lliteral->handle(), |
| true, |
| - overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE); |
| + overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE); |
| } else { |
| + GenerateInlineSmi inline_smi = |
| + loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI; |
| + if (lliteral != NULL) inline_smi = DONT_GENERATE_INLINE_SMI; |
| + if (rliteral != NULL) inline_smi = DONT_GENERATE_INLINE_SMI; |
| VirtualFrame::RegisterAllocationScope scope(this); |
| OverwriteMode overwrite_mode = NO_OVERWRITE; |
| if (overwrite_left) { |
| @@ -5321,7 +5533,7 @@ |
| } |
| Load(node->left()); |
| Load(node->right()); |
| - VirtualFrameBinaryOperation(node->op(), overwrite_mode); |
| + VirtualFrameBinaryOperation(node->op(), overwrite_mode, inline_smi); |
| } |
| } |
| ASSERT(!has_valid_frame() || |
| @@ -5813,6 +6025,7 @@ |
| frame_->scratch0(), frame_->scratch1()); |
| // Load the key and receiver from the stack. |
| + bool key_is_known_smi = frame_->KnownSmiAt(0); |
| Register key = frame_->PopToRegister(); |
| Register receiver = frame_->PopToRegister(key); |
| VirtualFrame::SpilledScope spilled(frame_); |
| @@ -5835,18 +6048,21 @@ |
| // Check the map. The null map used below is patched by the inline cache |
| // code. |
| __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| + |
| + // Check that the key is a smi. |
| + if (!key_is_known_smi) { |
| + __ tst(key, Operand(kSmiTagMask)); |
| + deferred->Branch(ne); |
| + } |
| + |
| #ifdef DEBUG |
| - Label check_inlined_codesize; |
| - masm_->bind(&check_inlined_codesize); |
| + Label check_inlined_codesize; |
| + masm_->bind(&check_inlined_codesize); |
| #endif |
| __ mov(scratch2, Operand(Factory::null_value())); |
| __ cmp(scratch1, scratch2); |
| deferred->Branch(ne); |
| - // Check that the key is a smi. |
| - __ tst(key, Operand(kSmiTagMask)); |
| - deferred->Branch(ne); |
| - |
| // Get the elements array from the receiver and check that it |
| // is not a dictionary. |
| __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); |