Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(411)

Side by Side Diff: src/ia32/deoptimizer-ia32.cc

Issue 6529032: Merge 6168:6800 from bleeding_edge to experimental/gc branch. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/codegen-ia32.cc ('k') | src/ia32/disasm-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #if defined(V8_TARGET_ARCH_IA32)
31
30 #include "codegen.h" 32 #include "codegen.h"
31 #include "deoptimizer.h" 33 #include "deoptimizer.h"
32 #include "full-codegen.h" 34 #include "full-codegen.h"
33 #include "safepoint-table.h" 35 #include "safepoint-table.h"
34 36
35 namespace v8 { 37 namespace v8 {
36 namespace internal { 38 namespace internal {
37 39
40 int Deoptimizer::table_entry_size_ = 10;
38 41
39 int Deoptimizer::table_entry_size_ = 10; 42
43 int Deoptimizer::patch_size() {
44 return Assembler::kCallInstructionLength;
45 }
46
47
48 static void ZapCodeRange(Address start, Address end) {
49 #ifdef DEBUG
50 ASSERT(start <= end);
51 int size = end - start;
52 CodePatcher destroyer(start, size);
53 while (size-- > 0) destroyer.masm()->int3();
54 #endif
55 }
56
40 57
41 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { 58 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
42 AssertNoAllocation no_allocation; 59 AssertNoAllocation no_allocation;
43 60
44 if (!function->IsOptimized()) return; 61 if (!function->IsOptimized()) return;
45 62
46 // Get the optimized code. 63 // Get the optimized code.
47 Code* code = function->code(); 64 Code* code = function->code();
65 Address code_start_address = code->instruction_start();
48 66
49 // Invalidate the relocation information, as it will become invalid by the 67 // We will overwrite the code's relocation info in-place. Relocation info
50 // code patching below, and is not needed any more. 68 // is written backward. The relocation info is the payload of a byte
51 code->InvalidateRelocation(); 69 // array. Later on we will slide this to the start of the byte array and
70 // create a filler object in the remaining space.
71 ByteArray* reloc_info = code->relocation_info();
72 Address reloc_end_address = reloc_info->address() + reloc_info->Size();
73 RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
52 74
53 // For each return after a safepoint insert a absolute call to the 75 // For each return after a safepoint insert a call to the corresponding
54 // corresponding deoptimization entry. 76 // deoptimization entry. Since the call is a relative encoding, write new
55 unsigned last_pc_offset = 0; 77 // reloc info. We do not need any of the existing reloc info because the
56 SafepointTable table(function->code()); 78 // existing code will not be used again (we zap it in debug builds).
57 for (unsigned i = 0; i < table.length(); i++) { 79 SafepointTable table(code);
58 unsigned pc_offset = table.GetPcOffset(i); 80 Address prev_address = code_start_address;
59 int deoptimization_index = table.GetDeoptimizationIndex(i); 81 for (unsigned i = 0; i < table.length(); ++i) {
60 int gap_code_size = table.GetGapCodeSize(i); 82 Address curr_address = code_start_address + table.GetPcOffset(i);
61 #ifdef DEBUG 83 ASSERT_GE(curr_address, prev_address);
62 // Destroy the code which is not supposed to run again. 84 ZapCodeRange(prev_address, curr_address);
63 unsigned instructions = pc_offset - last_pc_offset; 85
64 CodePatcher destroyer(code->instruction_start() + last_pc_offset, 86 SafepointEntry safepoint_entry = table.GetEntry(i);
65 instructions); 87 int deoptimization_index = safepoint_entry.deoptimization_index();
66 for (unsigned i = 0; i < instructions; i++) { 88 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
67 destroyer.masm()->int3(); 89 // The gap code is needed to get to the state expected at the bailout.
90 curr_address += safepoint_entry.gap_code_size();
91
92 CodePatcher patcher(curr_address, patch_size());
93 Address deopt_entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
94 patcher.masm()->call(deopt_entry, RelocInfo::NONE);
95
96 // We use RUNTIME_ENTRY for deoptimization bailouts.
97 RelocInfo rinfo(curr_address + 1, // 1 after the call opcode.
98 RelocInfo::RUNTIME_ENTRY,
99 reinterpret_cast<intptr_t>(deopt_entry));
100 reloc_info_writer.Write(&rinfo);
101 ASSERT_GE(reloc_info_writer.pos(),
102 reloc_info->address() + ByteArray::kHeaderSize);
103 curr_address += patch_size();
68 } 104 }
69 #endif 105 prev_address = curr_address;
70 last_pc_offset = pc_offset;
71 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
72 CodePatcher patcher(
73 code->instruction_start() + pc_offset + gap_code_size,
74 Assembler::kCallInstructionLength);
75 patcher.masm()->call(GetDeoptimizationEntry(deoptimization_index, LAZY),
76 RelocInfo::NONE);
77 last_pc_offset += gap_code_size + Assembler::kCallInstructionLength;
78 }
79 } 106 }
80 #ifdef DEBUG 107 ZapCodeRange(prev_address,
81 // Destroy the code which is not supposed to run again. 108 code_start_address + code->safepoint_table_offset());
82 unsigned instructions = code->safepoint_table_start() - last_pc_offset; 109
83 CodePatcher destroyer(code->instruction_start() + last_pc_offset, 110 // Move the relocation info to the beginning of the byte array.
84 instructions); 111 int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
85 for (unsigned i = 0; i < instructions; i++) { 112 memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
86 destroyer.masm()->int3(); 113
87 } 114 // The relocation info is in place, update the size.
88 #endif 115 reloc_info->set_length(new_reloc_size);
116
117 // Handle the junk part after the new relocation info. We will create
118 // a non-live object in the extra space at the end of the former reloc info.
119 Address junk_address = reloc_info->address() + reloc_info->Size();
120 ASSERT(junk_address <= reloc_end_address);
121 Heap::CreateFillerObjectAt(junk_address, reloc_end_address - junk_address);
89 122
90 // Add the deoptimizing code to the list. 123 // Add the deoptimizing code to the list.
91 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); 124 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
92 node->set_next(deoptimizing_code_list_); 125 node->set_next(deoptimizing_code_list_);
93 deoptimizing_code_list_ = node; 126 deoptimizing_code_list_ = node;
94 127
95 // Set the code for the function to non-optimized version. 128 // Set the code for the function to non-optimized version.
96 function->ReplaceCode(function->shared()->code()); 129 function->ReplaceCode(function->shared()->code());
97 130
98 if (FLAG_trace_deopt) { 131 if (FLAG_trace_deopt) {
99 PrintF("[forced deoptimization: "); 132 PrintF("[forced deoptimization: ");
100 function->PrintName(); 133 function->PrintName();
101 PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function)); 134 PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
102 } 135 }
103 } 136 }
104 137
105 138
106 void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo, 139 void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
107 Code* replacement_code) { 140 Code* check_code,
108 // The stack check code matches the pattern (on ia32, for example): 141 Code* replacement_code) {
142 Address call_target_address = pc_after - kIntSize;
143 ASSERT(check_code->entry() ==
144 Assembler::target_address_at(call_target_address));
145 // The stack check code matches the pattern:
109 // 146 //
110 // cmp esp, <limit> 147 // cmp esp, <limit>
111 // jae ok 148 // jae ok
112 // call <stack guard> 149 // call <stack guard>
150 // test eax, <loop nesting depth>
113 // ok: ... 151 // ok: ...
114 // 152 //
115 // We will patch the code to: 153 // We will patch away the branch so the code is:
116 // 154 //
117 // cmp esp, <limit> ;; Not changed 155 // cmp esp, <limit> ;; Not changed
118 // nop 156 // nop
119 // nop 157 // nop
120 // call <on-stack replacment> 158 // call <on-stack replacment>
159 // test eax, <loop nesting depth>
121 // ok: 160 // ok:
122 Address call_target_address = rinfo->pc();
123 ASSERT(*(call_target_address - 3) == 0x73 && // jae 161 ASSERT(*(call_target_address - 3) == 0x73 && // jae
124 *(call_target_address - 2) == 0x05 && // offset 162 *(call_target_address - 2) == 0x07 && // offset
125 *(call_target_address - 1) == 0xe8); // call 163 *(call_target_address - 1) == 0xe8); // call
126 *(call_target_address - 3) = 0x90; // nop 164 *(call_target_address - 3) = 0x90; // nop
127 *(call_target_address - 2) = 0x90; // nop 165 *(call_target_address - 2) = 0x90; // nop
128 rinfo->set_target_address(replacement_code->entry()); 166 Assembler::set_target_address_at(call_target_address,
167 replacement_code->entry());
129 } 168 }
130 169
131 170
132 void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) { 171 void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
133 Address call_target_address = rinfo->pc(); 172 Code* check_code,
173 Code* replacement_code) {
174 Address call_target_address = pc_after - kIntSize;
175 ASSERT(replacement_code->entry() ==
176 Assembler::target_address_at(call_target_address));
177 // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
178 // restore the conditional branch.
134 ASSERT(*(call_target_address - 3) == 0x90 && // nop 179 ASSERT(*(call_target_address - 3) == 0x90 && // nop
135 *(call_target_address - 2) == 0x90 && // nop 180 *(call_target_address - 2) == 0x90 && // nop
136 *(call_target_address - 1) == 0xe8); // call 181 *(call_target_address - 1) == 0xe8); // call
137 *(call_target_address - 3) = 0x73; // jae 182 *(call_target_address - 3) = 0x73; // jae
138 *(call_target_address - 2) = 0x05; // offset 183 *(call_target_address - 2) = 0x07; // offset
139 rinfo->set_target_address(check_code->entry()); 184 Assembler::set_target_address_at(call_target_address,
185 check_code->entry());
140 } 186 }
141 187
142 188
143 static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) { 189 static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
144 ByteArray* translations = data->TranslationByteArray(); 190 ByteArray* translations = data->TranslationByteArray();
145 int length = data->DeoptCount(); 191 int length = data->DeoptCount();
146 for (int i = 0; i < length; i++) { 192 for (int i = 0; i < length; i++) {
147 if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) { 193 if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
148 TranslationIterator it(translations, data->TranslationIndex(i)->value()); 194 TranslationIterator it(translations, data->TranslationIndex(i)->value());
149 int value = it.Next(); 195 int value = it.Next();
(...skipping 343 matching lines...) Expand 10 before | Expand all | Expand 10 after
493 __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id. 539 __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
494 __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0. 540 __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
495 __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta. 541 __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
496 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5); 542 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
497 543
498 // Preserve deoptimizer object in register eax and get the input 544 // Preserve deoptimizer object in register eax and get the input
499 // frame descriptor pointer. 545 // frame descriptor pointer.
500 __ mov(ebx, Operand(eax, Deoptimizer::input_offset())); 546 __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
501 547
502 // Fill in the input registers. 548 // Fill in the input registers.
503 for (int i = 0; i < kNumberOfRegisters; i++) { 549 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
504 int offset = (i * kIntSize) + FrameDescription::registers_offset(); 550 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
505 __ mov(ecx, Operand(esp, (kNumberOfRegisters - 1 - i) * kPointerSize)); 551 __ pop(Operand(ebx, offset));
506 __ mov(Operand(ebx, offset), ecx);
507 } 552 }
508 553
509 // Fill in the double input registers. 554 // Fill in the double input registers.
510 int double_regs_offset = FrameDescription::double_registers_offset(); 555 int double_regs_offset = FrameDescription::double_registers_offset();
511 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { 556 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
512 int dst_offset = i * kDoubleSize + double_regs_offset; 557 int dst_offset = i * kDoubleSize + double_regs_offset;
513 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; 558 int src_offset = i * kDoubleSize;
514 __ movdbl(xmm0, Operand(esp, src_offset)); 559 __ movdbl(xmm0, Operand(esp, src_offset));
515 __ movdbl(Operand(ebx, dst_offset), xmm0); 560 __ movdbl(Operand(ebx, dst_offset), xmm0);
516 } 561 }
517 562
518 // Remove the bailout id and the general purpose registers from the stack. 563 // Remove the bailout id and the double registers from the stack.
519 if (type() == EAGER) { 564 if (type() == EAGER) {
520 __ add(Operand(esp), Immediate(kSavedRegistersAreaSize + kPointerSize)); 565 __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
521 } else { 566 } else {
522 __ add(Operand(esp), Immediate(kSavedRegistersAreaSize + 2 * kPointerSize)); 567 __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
523 } 568 }
524 569
525 // Compute a pointer to the unwinding limit in register ecx; that is 570 // Compute a pointer to the unwinding limit in register ecx; that is
526 // the first stack slot not part of the input frame. 571 // the first stack slot not part of the input frame.
527 __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); 572 __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
528 __ add(ecx, Operand(esp)); 573 __ add(ecx, Operand(esp));
529 574
530 // Unwind the stack down to - but not including - the unwinding 575 // Unwind the stack down to - but not including - the unwinding
531 // limit and copy the contents of the activation frame to the input 576 // limit and copy the contents of the activation frame to the input
532 // frame description. 577 // frame description.
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
577 // Push state, pc, and continuation from the last output frame. 622 // Push state, pc, and continuation from the last output frame.
578 if (type() != OSR) { 623 if (type() != OSR) {
579 __ push(Operand(ebx, FrameDescription::state_offset())); 624 __ push(Operand(ebx, FrameDescription::state_offset()));
580 } 625 }
581 __ push(Operand(ebx, FrameDescription::pc_offset())); 626 __ push(Operand(ebx, FrameDescription::pc_offset()));
582 __ push(Operand(ebx, FrameDescription::continuation_offset())); 627 __ push(Operand(ebx, FrameDescription::continuation_offset()));
583 628
584 629
585 // Push the registers from the last output frame. 630 // Push the registers from the last output frame.
586 for (int i = 0; i < kNumberOfRegisters; i++) { 631 for (int i = 0; i < kNumberOfRegisters; i++) {
587 int offset = (i * kIntSize) + FrameDescription::registers_offset(); 632 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
588 __ push(Operand(ebx, offset)); 633 __ push(Operand(ebx, offset));
589 } 634 }
590 635
591 // Restore the registers from the stack. 636 // Restore the registers from the stack.
592 __ popad(); 637 __ popad();
593 638
594 // Return to the continuation point. 639 // Return to the continuation point.
595 __ ret(0); 640 __ ret(0);
596 } 641 }
597 642
598 643
599 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { 644 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
600 // Create a sequence of deoptimization entries. 645 // Create a sequence of deoptimization entries.
601 Label done; 646 Label done;
602 for (int i = 0; i < count(); i++) { 647 for (int i = 0; i < count(); i++) {
603 int start = masm()->pc_offset(); 648 int start = masm()->pc_offset();
604 USE(start); 649 USE(start);
605 __ push_imm32(i); 650 __ push_imm32(i);
606 __ jmp(&done); 651 __ jmp(&done);
607 ASSERT(masm()->pc_offset() - start == table_entry_size_); 652 ASSERT(masm()->pc_offset() - start == table_entry_size_);
608 } 653 }
609 __ bind(&done); 654 __ bind(&done);
610 } 655 }
611 656
612 #undef __ 657 #undef __
613 658
614 659
615 } } // namespace v8::internal 660 } } // namespace v8::internal
661
662 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/codegen-ia32.cc ('k') | src/ia32/disasm-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698