OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 24 matching lines...) Expand all Loading... |
35 #include "safepoint-table.h" | 35 #include "safepoint-table.h" |
36 | 36 |
37 namespace v8 { | 37 namespace v8 { |
38 namespace internal { | 38 namespace internal { |
39 | 39 |
40 | 40 |
41 const int Deoptimizer::table_entry_size_ = 10; | 41 const int Deoptimizer::table_entry_size_ = 10; |
42 | 42 |
43 | 43 |
44 int Deoptimizer::patch_size() { | 44 int Deoptimizer::patch_size() { |
45 return MacroAssembler::kCallInstructionLength; | 45 return Assembler::kCallInstructionLength; |
46 } | |
47 | |
48 | |
49 #ifdef DEBUG | |
50 // Overwrites code with int3 instructions. | |
51 static void ZapCodeRange(Address from, Address to) { | |
52 CHECK(from <= to); | |
53 int length = static_cast<int>(to - from); | |
54 CodePatcher destroyer(from, length); | |
55 while (length-- > 0) { | |
56 destroyer.masm()->int3(); | |
57 } | |
58 } | |
59 #endif | |
60 | |
61 | |
62 // Iterate through the entries of a SafepointTable that corresponds to | |
63 // deoptimization points. | |
64 class SafepointTableDeoptimiztionEntryIterator { | |
65 public: | |
66 explicit SafepointTableDeoptimiztionEntryIterator(Code* code) | |
67 : code_(code), table_(code), index_(-1), limit_(table_.length()) { | |
68 FindNextIndex(); | |
69 } | |
70 | |
71 SafepointEntry Next(Address* pc) { | |
72 if (index_ >= limit_) { | |
73 *pc = NULL; | |
74 return SafepointEntry(); // Invalid entry. | |
75 } | |
76 *pc = code_->instruction_start() + table_.GetPcOffset(index_); | |
77 SafepointEntry entry = table_.GetEntry(index_); | |
78 FindNextIndex(); | |
79 return entry; | |
80 } | |
81 | |
82 private: | |
83 void FindNextIndex() { | |
84 ASSERT(index_ < limit_); | |
85 while (++index_ < limit_) { | |
86 if (table_.GetEntry(index_).deoptimization_index() != | |
87 Safepoint::kNoDeoptimizationIndex) { | |
88 return; | |
89 } | |
90 } | |
91 } | |
92 | |
93 Code* code_; | |
94 SafepointTable table_; | |
95 // Index of next deoptimization entry. If negative after calling | |
96 // FindNextIndex, there are no more, and Next will return an invalid | |
97 // SafepointEntry. | |
98 int index_; | |
99 // Table length. | |
100 int limit_; | |
101 }; | |
102 | |
103 | |
104 void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { | |
105 // TODO(1276): Implement. | |
106 } | 46 } |
107 | 47 |
108 | 48 |
109 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { | 49 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
110 HandleScope scope; | 50 HandleScope scope; |
111 AssertNoAllocation no_allocation; | 51 AssertNoAllocation no_allocation; |
112 | 52 |
113 if (!function->IsOptimized()) return; | 53 if (!function->IsOptimized()) return; |
114 | 54 |
115 // Get the optimized code. | 55 // Get the optimized code. |
116 Code* code = function->code(); | 56 Code* code = function->code(); |
117 | 57 |
118 // Invalidate the relocation information, as it will become invalid by the | 58 // Invalidate the relocation information, as it will become invalid by the |
119 // code patching below, and is not needed any more. | 59 // code patching below, and is not needed any more. |
120 code->InvalidateRelocation(); | 60 code->InvalidateRelocation(); |
121 | 61 |
122 // For each return after a safepoint insert a absolute call to the | 62 // For each LLazyBailout instruction insert a absolute call to the |
123 // corresponding deoptimization entry, or a short call to an absolute | 63 // corresponding deoptimization entry, or a short call to an absolute |
124 // jump if space is short. The absolute jumps are put in a table just | 64 // jump if space is short. The absolute jumps are put in a table just |
125 // before the safepoint table (space was allocated there when the Code | 65 // before the safepoint table (space was allocated there when the Code |
126 // object was created, if necessary). | 66 // object was created, if necessary). |
127 | 67 |
128 Address instruction_start = function->code()->instruction_start(); | 68 Address instruction_start = function->code()->instruction_start(); |
129 Address jump_table_address = | |
130 instruction_start + function->code()->safepoint_table_offset(); | |
131 #ifdef DEBUG | 69 #ifdef DEBUG |
132 Address previous_pc = instruction_start; | 70 Address prev_call_address = NULL; |
133 #endif | 71 #endif |
134 | 72 DeoptimizationInputData* deopt_data = |
135 SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code()); | 73 DeoptimizationInputData::cast(code->deoptimization_data()); |
136 Address entry_pc = NULL; | 74 for (int i = 0; i < deopt_data->DeoptCount(); i++) { |
137 | 75 if (deopt_data->Pc(i)->value() == -1) continue; |
138 SafepointEntry current_entry = deoptimizations.Next(&entry_pc); | 76 // Position where Call will be patched in. |
139 while (current_entry.is_valid()) { | 77 Address call_address = instruction_start + deopt_data->Pc(i)->value(); |
140 int gap_code_size = current_entry.gap_code_size(); | 78 // There is room enough to write a long call instruction because we pad |
141 unsigned deoptimization_index = current_entry.deoptimization_index(); | 79 // LLazyBailout instructions with nops if necessary. |
142 | 80 CodePatcher patcher(call_address, Assembler::kCallInstructionLength); |
| 81 patcher.masm()->Call(GetDeoptimizationEntry(i, LAZY), RelocInfo::NONE); |
| 82 ASSERT(prev_call_address == NULL || |
| 83 call_address >= prev_call_address + patch_size()); |
| 84 ASSERT(call_address + patch_size() <= code->instruction_end()); |
143 #ifdef DEBUG | 85 #ifdef DEBUG |
144 // Destroy the code which is not supposed to run again. | 86 prev_call_address = call_address; |
145 ZapCodeRange(previous_pc, entry_pc); | |
146 #endif | 87 #endif |
147 // Position where Call will be patched in. | |
148 Address call_address = entry_pc + gap_code_size; | |
149 // End of call instruction, if using a direct call to a 64-bit address. | |
150 Address call_end_address = | |
151 call_address + MacroAssembler::kCallInstructionLength; | |
152 | |
153 // Find next deoptimization entry, if any. | |
154 Address next_pc = NULL; | |
155 SafepointEntry next_entry = deoptimizations.Next(&next_pc); | |
156 | |
157 if (!next_entry.is_valid() || next_pc >= call_end_address) { | |
158 // Room enough to write a long call instruction. | |
159 CodePatcher patcher(call_address, Assembler::kCallInstructionLength); | |
160 patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY), | |
161 RelocInfo::NONE); | |
162 #ifdef DEBUG | |
163 previous_pc = call_end_address; | |
164 #endif | |
165 } else { | |
166 // Not room enough for a long Call instruction. Write a short call | |
167 // instruction to a long jump placed elsewhere in the code. | |
168 #ifdef DEBUG | |
169 Address short_call_end_address = | |
170 call_address + MacroAssembler::kShortCallInstructionLength; | |
171 #endif | |
172 ASSERT(next_pc >= short_call_end_address); | |
173 | |
174 // Write jump in jump-table. | |
175 jump_table_address -= MacroAssembler::kJumpInstructionLength; | |
176 CodePatcher jump_patcher(jump_table_address, | |
177 MacroAssembler::kJumpInstructionLength); | |
178 jump_patcher.masm()->Jump( | |
179 GetDeoptimizationEntry(deoptimization_index, LAZY), | |
180 RelocInfo::NONE); | |
181 | |
182 // Write call to jump at call_offset. | |
183 CodePatcher call_patcher(call_address, | |
184 MacroAssembler::kShortCallInstructionLength); | |
185 call_patcher.masm()->call(jump_table_address); | |
186 #ifdef DEBUG | |
187 previous_pc = short_call_end_address; | |
188 #endif | |
189 } | |
190 | |
191 // Continue with next deoptimization entry. | |
192 current_entry = next_entry; | |
193 entry_pc = next_pc; | |
194 } | 88 } |
195 | 89 |
196 #ifdef DEBUG | |
197 // Destroy the code which is not supposed to run again. | |
198 ZapCodeRange(previous_pc, jump_table_address); | |
199 #endif | |
200 Isolate* isolate = code->GetIsolate(); | 90 Isolate* isolate = code->GetIsolate(); |
201 | 91 |
202 // Add the deoptimizing code to the list. | 92 // Add the deoptimizing code to the list. |
203 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); | 93 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
204 DeoptimizerData* data = isolate->deoptimizer_data(); | 94 DeoptimizerData* data = isolate->deoptimizer_data(); |
205 node->set_next(data->deoptimizing_code_list_); | 95 node->set_next(data->deoptimizing_code_list_); |
206 data->deoptimizing_code_list_ = node; | 96 data->deoptimizing_code_list_ = node; |
207 | 97 |
208 // We might be in the middle of incremental marking with compaction. | 98 // We might be in the middle of incremental marking with compaction. |
209 // Tell collector to treat this code object in a special way and | 99 // Tell collector to treat this code object in a special way and |
210 // ignore all slots that might have been recorded on it. | 100 // ignore all slots that might have been recorded on it. |
211 isolate->heap()->mark_compact_collector()->InvalidateCode(code); | 101 isolate->heap()->mark_compact_collector()->InvalidateCode(code); |
212 | 102 |
213 // Set the code for the function to non-optimized version. | 103 // Set the code for the function to non-optimized version. |
214 function->ReplaceCode(function->shared()->code()); | 104 function->ReplaceCode(function->shared()->code()); |
215 | 105 |
216 if (FLAG_trace_deopt) { | 106 if (FLAG_trace_deopt) { |
217 PrintF("[forced deoptimization: "); | 107 PrintF("[forced deoptimization: "); |
218 function->PrintName(); | 108 function->PrintName(); |
219 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); | 109 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); |
220 #ifdef DEBUG | |
221 if (FLAG_print_code) { | |
222 code->PrintLn(); | |
223 } | |
224 #endif | |
225 } | 110 } |
226 } | 111 } |
227 | 112 |
228 | 113 |
229 void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, | 114 void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, |
230 Address pc_after, | 115 Address pc_after, |
231 Code* check_code, | 116 Code* check_code, |
232 Code* replacement_code) { | 117 Code* replacement_code) { |
233 Address call_target_address = pc_after - kIntSize; | 118 Address call_target_address = pc_after - kIntSize; |
234 ASSERT(check_code->entry() == | 119 ASSERT(check_code->entry() == |
(...skipping 623 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
858 } | 743 } |
859 __ bind(&done); | 744 __ bind(&done); |
860 } | 745 } |
861 | 746 |
862 #undef __ | 747 #undef __ |
863 | 748 |
864 | 749 |
865 } } // namespace v8::internal | 750 } } // namespace v8::internal |
866 | 751 |
867 #endif // V8_TARGET_ARCH_X64 | 752 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |