OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 14 matching lines...) Expand all Loading... |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 #include "v8.h" | 28 #include "v8.h" |
29 | 29 |
30 #if V8_TARGET_ARCH_IA32 | 30 #if V8_TARGET_ARCH_IA32 |
31 | 31 |
32 #include "lithium-allocator-inl.h" | 32 #include "lithium-allocator-inl.h" |
33 #include "ia32/lithium-ia32.h" | 33 #include "ia32/lithium-ia32.h" |
34 #include "ia32/lithium-codegen-ia32.h" | 34 #include "ia32/lithium-codegen-ia32.h" |
| 35 #include "hydrogen-osr.h" |
35 | 36 |
36 namespace v8 { | 37 namespace v8 { |
37 namespace internal { | 38 namespace internal { |
38 | 39 |
39 #define DEFINE_COMPILE(type) \ | 40 #define DEFINE_COMPILE(type) \ |
40 void L##type::CompileToNative(LCodeGen* generator) { \ | 41 void L##type::CompileToNative(LCodeGen* generator) { \ |
41 generator->Do##type(this); \ | 42 generator->Do##type(this); \ |
42 } | 43 } |
43 LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) | 44 LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) |
44 #undef DEFINE_COMPILE | 45 #undef DEFINE_COMPILE |
(...skipping 423 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
468 LPhase phase("L_Building chunk", chunk_); | 469 LPhase phase("L_Building chunk", chunk_); |
469 status_ = BUILDING; | 470 status_ = BUILDING; |
470 | 471 |
471 // Reserve the first spill slot for the state of dynamic alignment. | 472 // Reserve the first spill slot for the state of dynamic alignment. |
472 if (info()->IsOptimizing()) { | 473 if (info()->IsOptimizing()) { |
473 int alignment_state_index = chunk_->GetNextSpillIndex(false); | 474 int alignment_state_index = chunk_->GetNextSpillIndex(false); |
474 ASSERT_EQ(alignment_state_index, 0); | 475 ASSERT_EQ(alignment_state_index, 0); |
475 USE(alignment_state_index); | 476 USE(alignment_state_index); |
476 } | 477 } |
477 | 478 |
| 479 // If compiling for OSR, reserve space for the unoptimized frame, |
| 480 // which will be subsumed into this frame. |
| 481 if (graph()->has_osr()) { |
| 482 for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { |
| 483 chunk_->GetNextSpillIndex(false); |
| 484 } |
| 485 } |
| 486 |
478 const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); | 487 const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); |
479 for (int i = 0; i < blocks->length(); i++) { | 488 for (int i = 0; i < blocks->length(); i++) { |
480 HBasicBlock* next = NULL; | 489 HBasicBlock* next = NULL; |
481 if (i < blocks->length() - 1) next = blocks->at(i + 1); | 490 if (i < blocks->length() - 1) next = blocks->at(i + 1); |
482 DoBasicBlock(blocks->at(i), next); | 491 DoBasicBlock(blocks->at(i), next); |
483 if (is_aborted()) return NULL; | 492 if (is_aborted()) return NULL; |
484 } | 493 } |
485 status_ = DONE; | 494 status_ = DONE; |
486 return chunk_; | 495 return chunk_; |
487 } | 496 } |
(...skipping 2076 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2564 CodeStubInterfaceDescriptor* descriptor = | 2573 CodeStubInterfaceDescriptor* descriptor = |
2565 info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); | 2574 info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); |
2566 int index = static_cast<int>(instr->index()); | 2575 int index = static_cast<int>(instr->index()); |
2567 Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index); | 2576 Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index); |
2568 return DefineFixed(result, reg); | 2577 return DefineFixed(result, reg); |
2569 } | 2578 } |
2570 } | 2579 } |
2571 | 2580 |
2572 | 2581 |
2573 LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { | 2582 LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { |
2574 int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width. | 2583 // Use an index that corresponds to the location in the unoptimized frame, |
2575 if (spill_index > LUnallocated::kMaxFixedSlotIndex) { | 2584 // which the optimized frame will subsume. |
2576 Abort("Too many spill slots needed for OSR"); | 2585 int env_index = instr->index(); |
2577 spill_index = 0; | 2586 int spill_index = 0; |
| 2587 if (instr->environment()->is_parameter_index(env_index)) { |
| 2588 spill_index = chunk()->GetParameterStackSlot(env_index); |
| 2589 } else { |
| 2590 spill_index = env_index - instr->environment()->first_local_index(); |
| 2591 if (spill_index > LUnallocated::kMaxFixedSlotIndex) { |
| 2592 Abort("Too many spill slots needed for OSR"); |
| 2593 spill_index = 0; |
| 2594 } |
| 2595 if (spill_index == 0) { |
| 2596 // The dynamic frame alignment state overwrites the first local. |
| 2597 // The first local is saved at the end of the unoptimized frame. |
| 2598 spill_index = graph()->osr()->UnoptimizedFrameSlots(); |
| 2599 } |
2578 } | 2600 } |
2579 return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); | 2601 return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); |
2580 } | 2602 } |
2581 | 2603 |
2582 | 2604 |
2583 LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { | 2605 LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { |
2584 LOperand* context = UseFixed(instr->context(), esi); | 2606 LOperand* context = UseFixed(instr->context(), esi); |
2585 argument_count_ -= instr->argument_count(); | 2607 argument_count_ -= instr->argument_count(); |
2586 LCallStub* result = new(zone()) LCallStub(context); | 2608 LCallStub* result = new(zone()) LCallStub(context); |
2587 return MarkAsCall(DefineFixed(result, eax), instr); | 2609 return MarkAsCall(DefineFixed(result, eax), instr); |
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2751 LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { | 2773 LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { |
2752 LOperand* object = UseRegister(instr->object()); | 2774 LOperand* object = UseRegister(instr->object()); |
2753 LOperand* index = UseTempRegister(instr->index()); | 2775 LOperand* index = UseTempRegister(instr->index()); |
2754 return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index)); | 2776 return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index)); |
2755 } | 2777 } |
2756 | 2778 |
2757 | 2779 |
2758 } } // namespace v8::internal | 2780 } } // namespace v8::internal |
2759 | 2781 |
2760 #endif // V8_TARGET_ARCH_IA32 | 2782 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |