OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 14 matching lines...) Expand all Loading... |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 #include "v8.h" | 28 #include "v8.h" |
29 | 29 |
30 #if V8_TARGET_ARCH_IA32 | 30 #if V8_TARGET_ARCH_IA32 |
31 | 31 |
32 #include "lithium-allocator-inl.h" | 32 #include "lithium-allocator-inl.h" |
33 #include "ia32/lithium-ia32.h" | 33 #include "ia32/lithium-ia32.h" |
34 #include "ia32/lithium-codegen-ia32.h" | 34 #include "ia32/lithium-codegen-ia32.h" |
| 35 #include "hydrogen-osr.h" |
35 | 36 |
36 namespace v8 { | 37 namespace v8 { |
37 namespace internal { | 38 namespace internal { |
38 | 39 |
39 #define DEFINE_COMPILE(type) \ | 40 #define DEFINE_COMPILE(type) \ |
40 void L##type::CompileToNative(LCodeGen* generator) { \ | 41 void L##type::CompileToNative(LCodeGen* generator) { \ |
41 generator->Do##type(this); \ | 42 generator->Do##type(this); \ |
42 } | 43 } |
43 LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) | 44 LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) |
44 #undef DEFINE_COMPILE | 45 #undef DEFINE_COMPILE |
(...skipping 441 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
486 LPhase phase("L_Building chunk", chunk_); | 487 LPhase phase("L_Building chunk", chunk_); |
487 status_ = BUILDING; | 488 status_ = BUILDING; |
488 | 489 |
489 // Reserve the first spill slot for the state of dynamic alignment. | 490 // Reserve the first spill slot for the state of dynamic alignment. |
490 if (info()->IsOptimizing()) { | 491 if (info()->IsOptimizing()) { |
491 int alignment_state_index = chunk_->GetNextSpillIndex(false); | 492 int alignment_state_index = chunk_->GetNextSpillIndex(false); |
492 ASSERT_EQ(alignment_state_index, 0); | 493 ASSERT_EQ(alignment_state_index, 0); |
493 USE(alignment_state_index); | 494 USE(alignment_state_index); |
494 } | 495 } |
495 | 496 |
| 497 // If compiling for OSR, reserve space for the unoptimized frame, |
| 498 // which will be subsumed into this frame. |
| 499 if (graph()->has_osr()) { |
| 500 for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { |
| 501 chunk_->GetNextSpillIndex(false); |
| 502 } |
| 503 } |
| 504 |
496 const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); | 505 const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); |
497 for (int i = 0; i < blocks->length(); i++) { | 506 for (int i = 0; i < blocks->length(); i++) { |
498 HBasicBlock* next = NULL; | 507 HBasicBlock* next = NULL; |
499 if (i < blocks->length() - 1) next = blocks->at(i + 1); | 508 if (i < blocks->length() - 1) next = blocks->at(i + 1); |
500 DoBasicBlock(blocks->at(i), next); | 509 DoBasicBlock(blocks->at(i), next); |
501 if (is_aborted()) return NULL; | 510 if (is_aborted()) return NULL; |
502 } | 511 } |
503 status_ = DONE; | 512 status_ = DONE; |
504 return chunk_; | 513 return chunk_; |
505 } | 514 } |
(...skipping 2084 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2590 CodeStubInterfaceDescriptor* descriptor = | 2599 CodeStubInterfaceDescriptor* descriptor = |
2591 info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); | 2600 info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); |
2592 int index = static_cast<int>(instr->index()); | 2601 int index = static_cast<int>(instr->index()); |
2593 Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index); | 2602 Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index); |
2594 return DefineFixed(result, reg); | 2603 return DefineFixed(result, reg); |
2595 } | 2604 } |
2596 } | 2605 } |
2597 | 2606 |
2598 | 2607 |
2599 LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { | 2608 LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { |
2600 int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width. | 2609 // Use an index that corresponds to the location in the unoptimized frame, |
2601 if (spill_index > LUnallocated::kMaxFixedSlotIndex) { | 2610 // which the optimized frame will subsume. |
2602 Abort("Too many spill slots needed for OSR"); | 2611 int env_index = instr->index(); |
2603 spill_index = 0; | 2612 int spill_index = 0; |
| 2613 if (instr->environment()->is_parameter_index(env_index)) { |
| 2614 spill_index = chunk()->GetParameterStackSlot(env_index); |
| 2615 } else { |
| 2616 spill_index = env_index - instr->environment()->first_local_index(); |
| 2617 if (spill_index > LUnallocated::kMaxFixedSlotIndex) { |
| 2618 Abort("Too many spill slots needed for OSR"); |
| 2619 spill_index = 0; |
| 2620 } |
| 2621 if (spill_index == 0) { |
| 2622 // The dynamic frame alignment state overwrites the first local. |
| 2623 // The first local is saved at the end of the unoptimized frame. |
| 2624 spill_index = graph()->osr()->UnoptimizedFrameSlots(); |
| 2625 } |
2604 } | 2626 } |
2605 return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); | 2627 return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index); |
2606 } | 2628 } |
2607 | 2629 |
2608 | 2630 |
2609 LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { | 2631 LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { |
2610 LOperand* context = UseFixed(instr->context(), esi); | 2632 LOperand* context = UseFixed(instr->context(), esi); |
2611 argument_count_ -= instr->argument_count(); | 2633 argument_count_ -= instr->argument_count(); |
2612 LCallStub* result = new(zone()) LCallStub(context); | 2634 LCallStub* result = new(zone()) LCallStub(context); |
2613 return MarkAsCall(DefineFixed(result, eax), instr); | 2635 return MarkAsCall(DefineFixed(result, eax), instr); |
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2777 LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { | 2799 LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { |
2778 LOperand* object = UseRegister(instr->object()); | 2800 LOperand* object = UseRegister(instr->object()); |
2779 LOperand* index = UseTempRegister(instr->index()); | 2801 LOperand* index = UseTempRegister(instr->index()); |
2780 return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index)); | 2802 return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index)); |
2781 } | 2803 } |
2782 | 2804 |
2783 | 2805 |
2784 } } // namespace v8::internal | 2806 } } // namespace v8::internal |
2785 | 2807 |
2786 #endif // V8_TARGET_ARCH_IA32 | 2808 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |