| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 24 matching lines...) Expand all Loading... |
| 35 #include "safepoint-table.h" | 35 #include "safepoint-table.h" |
| 36 | 36 |
| 37 namespace v8 { | 37 namespace v8 { |
| 38 namespace internal { | 38 namespace internal { |
| 39 | 39 |
| 40 | 40 |
| 41 const int Deoptimizer::table_entry_size_ = 10; | 41 const int Deoptimizer::table_entry_size_ = 10; |
| 42 | 42 |
| 43 | 43 |
| 44 int Deoptimizer::patch_size() { | 44 int Deoptimizer::patch_size() { |
| 45 return MacroAssembler::kCallInstructionLength; | 45 return Assembler::kCallInstructionLength; |
| 46 } | |
| 47 | |
| 48 | |
| 49 #ifdef DEBUG | |
| 50 // Overwrites code with int3 instructions. | |
| 51 static void ZapCodeRange(Address from, Address to) { | |
| 52 CHECK(from <= to); | |
| 53 int length = static_cast<int>(to - from); | |
| 54 CodePatcher destroyer(from, length); | |
| 55 while (length-- > 0) { | |
| 56 destroyer.masm()->int3(); | |
| 57 } | |
| 58 } | |
| 59 #endif | |
| 60 | |
| 61 | |
| 62 // Iterate through the entries of a SafepointTable that corresponds to | |
| 63 // deoptimization points. | |
| 64 class SafepointTableDeoptimiztionEntryIterator { | |
| 65 public: | |
| 66 explicit SafepointTableDeoptimiztionEntryIterator(Code* code) | |
| 67 : code_(code), table_(code), index_(-1), limit_(table_.length()) { | |
| 68 FindNextIndex(); | |
| 69 } | |
| 70 | |
| 71 SafepointEntry Next(Address* pc) { | |
| 72 if (index_ >= limit_) { | |
| 73 *pc = NULL; | |
| 74 return SafepointEntry(); // Invalid entry. | |
| 75 } | |
| 76 *pc = code_->instruction_start() + table_.GetPcOffset(index_); | |
| 77 SafepointEntry entry = table_.GetEntry(index_); | |
| 78 FindNextIndex(); | |
| 79 return entry; | |
| 80 } | |
| 81 | |
| 82 private: | |
| 83 void FindNextIndex() { | |
| 84 ASSERT(index_ < limit_); | |
| 85 while (++index_ < limit_) { | |
| 86 if (table_.GetEntry(index_).deoptimization_index() != | |
| 87 Safepoint::kNoDeoptimizationIndex) { | |
| 88 return; | |
| 89 } | |
| 90 } | |
| 91 } | |
| 92 | |
| 93 Code* code_; | |
| 94 SafepointTable table_; | |
| 95 // Index of next deoptimization entry. If negative after calling | |
| 96 // FindNextIndex, there are no more, and Next will return an invalid | |
| 97 // SafepointEntry. | |
| 98 int index_; | |
| 99 // Table length. | |
| 100 int limit_; | |
| 101 }; | |
| 102 | |
| 103 | |
| 104 void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { | |
| 105 // TODO(1276): Implement. | |
| 106 } | 46 } |
| 107 | 47 |
| 108 | 48 |
| 109 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { | 49 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
| 110 HandleScope scope; | 50 HandleScope scope; |
| 111 AssertNoAllocation no_allocation; | 51 AssertNoAllocation no_allocation; |
| 112 | 52 |
| 113 if (!function->IsOptimized()) return; | 53 if (!function->IsOptimized()) return; |
| 114 | 54 |
| 115 // Get the optimized code. | 55 // Get the optimized code. |
| 116 Code* code = function->code(); | 56 Code* code = function->code(); |
| 117 | 57 |
| 118 // Invalidate the relocation information, as it will become invalid by the | 58 // Invalidate the relocation information, as it will become invalid by the |
| 119 // code patching below, and is not needed any more. | 59 // code patching below, and is not needed any more. |
| 120 code->InvalidateRelocation(); | 60 code->InvalidateRelocation(); |
| 121 | 61 |
| 122 // For each return after a safepoint insert a absolute call to the | 62 // For each LLazyBailout instruction insert a absolute call to the |
| 123 // corresponding deoptimization entry, or a short call to an absolute | 63 // corresponding deoptimization entry, or a short call to an absolute |
| 124 // jump if space is short. The absolute jumps are put in a table just | 64 // jump if space is short. The absolute jumps are put in a table just |
| 125 // before the safepoint table (space was allocated there when the Code | 65 // before the safepoint table (space was allocated there when the Code |
| 126 // object was created, if necessary). | 66 // object was created, if necessary). |
| 127 | 67 |
| 128 Address instruction_start = function->code()->instruction_start(); | 68 Address instruction_start = function->code()->instruction_start(); |
| 129 Address jump_table_address = | 69 DeoptimizationInputData* deopt_data = |
| 130 instruction_start + function->code()->safepoint_table_offset(); | 70 DeoptimizationInputData::cast(code->deoptimization_data()); |
| 131 #ifdef DEBUG | 71 for (int i = 0; i < deopt_data->DeoptCount(); i++) { |
| 132 Address previous_pc = instruction_start; | 72 if (deopt_data->Pc(i)->value() == -1) continue; |
| 133 #endif | |
| 134 | |
| 135 SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code()); | |
| 136 Address entry_pc = NULL; | |
| 137 | |
| 138 SafepointEntry current_entry = deoptimizations.Next(&entry_pc); | |
| 139 while (current_entry.is_valid()) { | |
| 140 int gap_code_size = current_entry.gap_code_size(); | |
| 141 unsigned deoptimization_index = current_entry.deoptimization_index(); | |
| 142 | |
| 143 #ifdef DEBUG | |
| 144 // Destroy the code which is not supposed to run again. | |
| 145 ZapCodeRange(previous_pc, entry_pc); | |
| 146 #endif | |
| 147 // Position where Call will be patched in. | 73 // Position where Call will be patched in. |
| 148 Address call_address = entry_pc + gap_code_size; | 74 Address call_address = instruction_start + deopt_data->Pc(i)->value(); |
| 149 // End of call instruction, if using a direct call to a 64-bit address. | 75 // There is room enough to write a long call instruction because we pad |
| 150 Address call_end_address = | 76 // LLazyBailout instructions with nops if necessary. |
| 151 call_address + MacroAssembler::kCallInstructionLength; | |
| 152 | |
| 153 // Find next deoptimization entry, if any. | |
| 154 Address next_pc = NULL; | |
| 155 SafepointEntry next_entry = deoptimizations.Next(&next_pc); | |
| 156 | |
| 157 if (!next_entry.is_valid() || next_pc >= call_end_address) { | |
| 158 // Room enough to write a long call instruction. | |
| 159 CodePatcher patcher(call_address, Assembler::kCallInstructionLength); | 77 CodePatcher patcher(call_address, Assembler::kCallInstructionLength); |
| 160 patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY), | 78 patcher.masm()->Call(GetDeoptimizationEntry(i, LAZY), RelocInfo::NONE); |
| 161 RelocInfo::NONE); | |
| 162 #ifdef DEBUG | |
| 163 previous_pc = call_end_address; | |
| 164 #endif | |
| 165 } else { | |
| 166 // Not room enough for a long Call instruction. Write a short call | |
| 167 // instruction to a long jump placed elsewhere in the code. | |
| 168 #ifdef DEBUG | |
| 169 Address short_call_end_address = | |
| 170 call_address + MacroAssembler::kShortCallInstructionLength; | |
| 171 #endif | |
| 172 ASSERT(next_pc >= short_call_end_address); | |
| 173 | |
| 174 // Write jump in jump-table. | |
| 175 jump_table_address -= MacroAssembler::kJumpInstructionLength; | |
| 176 CodePatcher jump_patcher(jump_table_address, | |
| 177 MacroAssembler::kJumpInstructionLength); | |
| 178 jump_patcher.masm()->Jump( | |
| 179 GetDeoptimizationEntry(deoptimization_index, LAZY), | |
| 180 RelocInfo::NONE); | |
| 181 | |
| 182 // Write call to jump at call_offset. | |
| 183 CodePatcher call_patcher(call_address, | |
| 184 MacroAssembler::kShortCallInstructionLength); | |
| 185 call_patcher.masm()->call(jump_table_address); | |
| 186 #ifdef DEBUG | |
| 187 previous_pc = short_call_end_address; | |
| 188 #endif | |
| 189 } | |
| 190 | |
| 191 // Continue with next deoptimization entry. | |
| 192 current_entry = next_entry; | |
| 193 entry_pc = next_pc; | |
| 194 } | 79 } |
| 195 | 80 |
| 196 #ifdef DEBUG | |
| 197 // Destroy the code which is not supposed to run again. | |
| 198 ZapCodeRange(previous_pc, jump_table_address); | |
| 199 #endif | |
| 200 Isolate* isolate = code->GetIsolate(); | 81 Isolate* isolate = code->GetIsolate(); |
| 201 | 82 |
| 202 // Add the deoptimizing code to the list. | 83 // Add the deoptimizing code to the list. |
| 203 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); | 84 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
| 204 DeoptimizerData* data = isolate->deoptimizer_data(); | 85 DeoptimizerData* data = isolate->deoptimizer_data(); |
| 205 node->set_next(data->deoptimizing_code_list_); | 86 node->set_next(data->deoptimizing_code_list_); |
| 206 data->deoptimizing_code_list_ = node; | 87 data->deoptimizing_code_list_ = node; |
| 207 | 88 |
| 208 // We might be in the middle of incremental marking with compaction. | 89 // We might be in the middle of incremental marking with compaction. |
| 209 // Tell collector to treat this code object in a special way and | 90 // Tell collector to treat this code object in a special way and |
| 210 // ignore all slots that might have been recorded on it. | 91 // ignore all slots that might have been recorded on it. |
| 211 isolate->heap()->mark_compact_collector()->InvalidateCode(code); | 92 isolate->heap()->mark_compact_collector()->InvalidateCode(code); |
| 212 | 93 |
| 213 // Set the code for the function to non-optimized version. | 94 // Set the code for the function to non-optimized version. |
| 214 function->ReplaceCode(function->shared()->code()); | 95 function->ReplaceCode(function->shared()->code()); |
| 215 | 96 |
| 216 if (FLAG_trace_deopt) { | 97 if (FLAG_trace_deopt) { |
| 217 PrintF("[forced deoptimization: "); | 98 PrintF("[forced deoptimization: "); |
| 218 function->PrintName(); | 99 function->PrintName(); |
| 219 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); | 100 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); |
| 220 #ifdef DEBUG | |
| 221 if (FLAG_print_code) { | |
| 222 code->PrintLn(); | |
| 223 } | |
| 224 #endif | |
| 225 } | 101 } |
| 226 } | 102 } |
| 227 | 103 |
| 228 | 104 |
| 229 void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, | 105 void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, |
| 230 Address pc_after, | 106 Address pc_after, |
| 231 Code* check_code, | 107 Code* check_code, |
| 232 Code* replacement_code) { | 108 Code* replacement_code) { |
| 233 Address call_target_address = pc_after - kIntSize; | 109 Address call_target_address = pc_after - kIntSize; |
| 234 ASSERT(check_code->entry() == | 110 ASSERT(check_code->entry() == |
| (...skipping 623 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 858 } | 734 } |
| 859 __ bind(&done); | 735 __ bind(&done); |
| 860 } | 736 } |
| 861 | 737 |
| 862 #undef __ | 738 #undef __ |
| 863 | 739 |
| 864 | 740 |
| 865 } } // namespace v8::internal | 741 } } // namespace v8::internal |
| 866 | 742 |
| 867 #endif // V8_TARGET_ARCH_X64 | 743 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |