Chromium Code Reviews| Index: src/ia32/lithium-ia32.cc |
| diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc |
| index 65a300edcd2fea15d20b081f81ec6d146a3e97ef..698c1db60e77cf97aeabaf50b67063a547242c3f 100644 |
| --- a/src/ia32/lithium-ia32.cc |
| +++ b/src/ia32/lithium-ia32.cc |
| @@ -44,10 +44,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) |
| #undef DEFINE_COMPILE |
| LOsrEntry::LOsrEntry() { |
| - for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { |
| + for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { |
| register_spills_[i] = NULL; |
| } |
| - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) { |
| + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { |
| double_register_spills_[i] = NULL; |
| } |
| } |
| @@ -460,9 +460,11 @@ LPlatformChunk* LChunkBuilder::Build() { |
| status_ = BUILDING; |
| // Reserve the first spill slot for the state of dynamic alignment. |
| - int alignment_state_index = chunk_->GetNextSpillIndex(false); |
| - ASSERT_EQ(alignment_state_index, 0); |
| - USE(alignment_state_index); |
| + if (info()->IsOptimizing()) { |
| + int alignment_state_index = chunk_->GetNextSpillIndex(false); |
| + ASSERT_EQ(alignment_state_index, 0); |
| + USE(alignment_state_index); |
| + } |
| const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); |
| for (int i = 0; i < blocks->length(); i++) { |
| @@ -494,6 +496,12 @@ LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) { |
| } |
| +LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) { |
| + return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, |
| + X87TopOfStackRegister::ToAllocationIndex(reg)); |
| +} |
| + |
| + |
| LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { |
| return Use(value, ToUnallocated(fixed_register)); |
| } |
| @@ -626,6 +634,13 @@ LInstruction* LChunkBuilder::DefineFixedDouble( |
| } |
| +template<int I, int T> |
| +LInstruction* LChunkBuilder::DefineX87TOS( |
| + LTemplateInstruction<1, I, T>* instr) { |
| + return Define(instr, ToUnallocated(x87tos)); |
| +} |
| + |
| + |
| LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { |
| HEnvironment* hydrogen_env = current_block_->last_environment(); |
| int argument_index_accumulator = 0; |
| @@ -638,6 +653,8 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { |
| LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, |
| HInstruction* hinstr, |
| CanDeoptimize can_deoptimize) { |
| + info()->MarkAsNonDeferredCalling(); |
| + |
| #ifdef DEBUG |
| instr->VerifyCall(); |
| #endif |
| @@ -803,44 +820,46 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { |
| ASSERT(is_building()); |
| current_block_ = block; |
| next_block_ = next_block; |
| - if (block->IsStartBlock()) { |
| - block->UpdateEnvironment(graph_->start_environment()); |
| - argument_count_ = 0; |
| - } else if (block->predecessors()->length() == 1) { |
| - // We have a single predecessor => copy environment and outgoing |
| - // argument count from the predecessor. |
| - ASSERT(block->phis()->length() == 0); |
| - HBasicBlock* pred = block->predecessors()->at(0); |
| - HEnvironment* last_environment = pred->last_environment(); |
| - ASSERT(last_environment != NULL); |
| - // Only copy the environment, if it is later used again. |
| - if (pred->end()->SecondSuccessor() == NULL) { |
| - ASSERT(pred->end()->FirstSuccessor() == block); |
| + if (graph()->info()->IsOptimizing()) { |
|
Jakob Kummerow
2012/11/28 16:28:22
I guess the changes to this method need porting (w
danno
2012/11/30 16:23:24
It looks like subsequent changes obviated the need
|
| + if (block->IsStartBlock()) { |
| + block->UpdateEnvironment(graph_->start_environment()); |
| + argument_count_ = 0; |
| + } else if (block->predecessors()->length() == 1) { |
| + // We have a single predecessor => copy environment and outgoing |
| + // argument count from the predecessor. |
| + ASSERT(block->phis()->length() == 0); |
| + HBasicBlock* pred = block->predecessors()->at(0); |
| + HEnvironment* last_environment = pred->last_environment(); |
| + ASSERT(last_environment != NULL); |
| + // Only copy the environment, if it is later used again. |
| + if (pred->end()->SecondSuccessor() == NULL) { |
| + ASSERT(pred->end()->FirstSuccessor() == block); |
| + } else { |
| + if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || |
| + pred->end()->SecondSuccessor()->block_id() > block->block_id()) { |
| + last_environment = last_environment->Copy(); |
| + } |
| + } |
| + block->UpdateEnvironment(last_environment); |
| + ASSERT(pred->argument_count() >= 0); |
| + argument_count_ = pred->argument_count(); |
| } else { |
| - if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || |
| - pred->end()->SecondSuccessor()->block_id() > block->block_id()) { |
| - last_environment = last_environment->Copy(); |
| + // We are at a state join => process phis. |
| + HBasicBlock* pred = block->predecessors()->at(0); |
| + // No need to copy the environment, it cannot be used later. |
| + HEnvironment* last_environment = pred->last_environment(); |
| + for (int i = 0; i < block->phis()->length(); ++i) { |
| + HPhi* phi = block->phis()->at(i); |
| + last_environment->SetValueAt(phi->merged_index(), phi); |
| } |
| + for (int i = 0; i < block->deleted_phis()->length(); ++i) { |
| + last_environment->SetValueAt(block->deleted_phis()->at(i), |
| + graph_->GetConstantUndefined()); |
| + } |
| + block->UpdateEnvironment(last_environment); |
| + // Pick up the outgoing argument count of one of the predecessors. |
| + argument_count_ = pred->argument_count(); |
| } |
| - block->UpdateEnvironment(last_environment); |
| - ASSERT(pred->argument_count() >= 0); |
| - argument_count_ = pred->argument_count(); |
| - } else { |
| - // We are at a state join => process phis. |
| - HBasicBlock* pred = block->predecessors()->at(0); |
| - // No need to copy the environment, it cannot be used later. |
| - HEnvironment* last_environment = pred->last_environment(); |
| - for (int i = 0; i < block->phis()->length(); ++i) { |
| - HPhi* phi = block->phis()->at(i); |
| - last_environment->SetValueAt(phi->merged_index(), phi); |
| - } |
| - for (int i = 0; i < block->deleted_phis()->length(); ++i) { |
| - last_environment->SetValueAt(block->deleted_phis()->at(i), |
| - graph_->GetConstantUndefined()); |
| - } |
| - block->UpdateEnvironment(last_environment); |
| - // Pick up the outgoing argument count of one of the predecessors. |
| - argument_count_ = pred->argument_count(); |
| } |
| HInstruction* current = block->first(); |
| int start = chunk_->instructions()->length(); |
| @@ -1680,8 +1699,12 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { |
| LInstruction* LChunkBuilder::DoChange(HChange* instr) { |
| Representation from = instr->from(); |
| Representation to = instr->to(); |
| + // Only mark conversions that might need to allocate as calling rather than |
| + // all changes. This makes simple, non-allocating conversion not have to force |
| + // building a stack frame. |
| if (from.IsTagged()) { |
| if (to.IsDouble()) { |
| + info()->MarkAsDeferredCalling(); |
| LOperand* value = UseRegister(instr->value()); |
| // Temp register only necessary for minus zero check. |
| LOperand* temp = instr->deoptimize_on_minus_zero() |
| @@ -1706,7 +1729,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { |
| } |
| } else if (from.IsDouble()) { |
| if (to.IsTagged()) { |
| - LOperand* value = UseRegister(instr->value()); |
| + info()->MarkAsDeferredCalling(); |
| + LOperand* value = CpuFeatures::IsSupported(SSE2) |
| + ? UseRegisterAtStart(instr->value()) |
| + : UseAtStart(instr->value()); |
| LOperand* temp = TempRegister(); |
| // Make sure that temp and result_temp are different registers. |
| @@ -1724,6 +1750,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { |
| DefineAsRegister(new(zone()) LDoubleToI(value, temp))); |
| } |
| } else if (from.IsInteger32()) { |
| + info()->MarkAsDeferredCalling(); |
| if (to.IsTagged()) { |
| HValue* val = instr->value(); |
| LOperand* value = UseRegister(val); |
| @@ -2240,8 +2267,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { |
| LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { |
| - int spill_index = chunk()->GetParameterStackSlot(instr->index()); |
| - return DefineAsSpilled(new(zone()) LParameter, spill_index); |
| + LParameter* result = new(zone()) LParameter; |
| + if (info()->IsOptimizing()) { |
| + int spill_index = chunk()->GetParameterStackSlot(instr->index()); |
| + return DefineAsSpilled(result, spill_index); |
| + } else { |
| + ASSERT(info()->IsStub()); |
| + CodeStubInterfaceDescriptor* descriptor = |
| + info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); |
| + Register reg = descriptor->register_params[instr->index()]; |
| + return DefineFixed(result, reg); |
| + } |
| } |
| @@ -2342,6 +2378,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { |
| LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { |
| + info()->MarkAsDeferredCalling(); |
| if (instr->is_function_entry()) { |
| LOperand* context = UseFixed(instr->context(), esi); |
| return MarkAsCall(new(zone()) LStackCheck(context), instr); |