Index: src/x64/deoptimizer-x64.cc |
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc |
index ee70785f2673811244391599c93547f95bf80383..9581ea974cbf6297aa1fbb31f43b89a0ac6ea7ef 100644 |
--- a/src/x64/deoptimizer-x64.cc |
+++ b/src/x64/deoptimizer-x64.cc |
@@ -42,10 +42,62 @@ int Deoptimizer::table_entry_size_ = 10; |
int Deoptimizer::patch_size() { |
- return Assembler::kCallInstructionLength; |
+ return MacroAssembler::kCallInstructionLength; |
} |
+#ifdef DEBUG |
+// Overwrites code with int3 instructions. |
+static void ZapInstructions(Code* code, unsigned from_offset, unsigned length) { |
Kevin Millikin (Chromium)
2011/02/04 11:34:54
A similar function is called ZapCodeRange on IA32.
Lasse Reichstein
2011/02/04 12:32:13
Renamed.
|
+ CodePatcher destroyer(code->instruction_start() + from_offset, length); |
+ while (length-- > 0) { |
+ destroyer.masm()->int3(); |
+ } |
+} |
+#endif |
+ |
+ |
+// Iterate through the entries of a SafepointTable that corresponds to |
+// deoptimization points. |
+class SafepointTableDeoptimiztionEntryIterator { |
+ public: |
+ explicit SafepointTableDeoptimiztionEntryIterator(SafepointTable* table) |
+ : table_(table), index_(-1) { |
+ FindNextIndex(); |
+ } |
+ |
+ SafepointEntry Next(unsigned* pc_offset) { |
+ if (index_ < 0) { |
+ *pc_offset = 0; |
+ return SafepointEntry(); // Invalid entry. |
+ } |
+ *pc_offset = table_->GetPcOffset(index_); |
+ SafepointEntry entry = table_->GetEntry(index_); |
+ FindNextIndex(); |
+ return entry; |
+ } |
+ |
+ private: |
+ void FindNextIndex() { |
+ for (int i = index_ + 1, n = table_->length(); i < n; i++) { |
+ if (table_->GetEntry(i).deoptimization_index() != |
+ Safepoint::kNoDeoptimizationIndex) { |
+ index_ = i; |
+ return; |
+ } |
+ } |
+ // Mark as having no more deoptimization entries. |
+ index_ = -1; |
Kevin Millikin (Chromium)
2011/02/04 11:34:54
This is also the initial state. It seems like you
Lasse Reichstein
2011/02/04 12:32:13
Done.
|
+ } |
+ |
+ SafepointTable* table_; |
+ // Index of next deoptimization entry. If negative after calling |
+ // FindNextIndex, there are no more, and Next will return an invalid |
+ // SafepointEntry. |
+ int index_; |
+}; |
+ |
+ |
void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
AssertNoAllocation no_allocation; |
@@ -59,42 +111,77 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
code->InvalidateRelocation(); |
// For each return after a safepoint insert a absolute call to the |
- // corresponding deoptimization entry. |
- unsigned last_pc_offset = 0; |
+ // corresponding deoptimization entry, or a short call to an absolute |
+ // jump if space is short. The absolute jumps are put in a table just |
+ // before the safepoint table (space was allocated there when the Code |
+ // object was created, if necessary). |
+ unsigned jump_table_offset = function->code()->safepoint_table_offset(); |
+ unsigned previous_pc_offset = 0; |
SafepointTable table(function->code()); |
- for (unsigned i = 0; i < table.length(); i++) { |
- unsigned pc_offset = table.GetPcOffset(i); |
- SafepointEntry safepoint_entry = table.GetEntry(i); |
- int deoptimization_index = safepoint_entry.deoptimization_index(); |
- int gap_code_size = safepoint_entry.gap_code_size(); |
-#ifdef DEBUG |
- // Destroy the code which is not supposed to run again. |
- unsigned instructions = pc_offset - last_pc_offset; |
- CodePatcher destroyer(code->instruction_start() + last_pc_offset, |
- instructions); |
- for (unsigned i = 0; i < instructions; i++) { |
- destroyer.masm()->int3(); |
- } |
-#endif |
- last_pc_offset = pc_offset; |
- if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { |
- last_pc_offset += gap_code_size; |
- CodePatcher patcher(code->instruction_start() + last_pc_offset, |
- patch_size()); |
+ SafepointTableDeoptimiztionEntryIterator deoptimizations(&table); |
+ |
+ unsigned entry_pc_offset = 0; |
+ SafepointEntry current_entry = deoptimizations.Next(&entry_pc_offset); |
+ |
+ while (current_entry.is_valid()) { |
+ int gap_code_size = current_entry.gap_code_size(); |
+ unsigned deoptimization_index = current_entry.deoptimization_index(); |
+ |
+ #ifdef DEBUG |
Kevin Millikin (Chromium)
2011/02/04 11:34:54
We usually write #ifdef aligned at the left margin
Lasse Reichstein
2011/02/04 12:32:13
Ack. I don't know how this got indented. Probably
|
+ // Destroy the code which is not supposed to run again. |
+ CHECK(entry_pc_offset >= previous_pc_offset); |
+ ZapInstructions(code, previous_pc_offset, |
+ entry_pc_offset - previous_pc_offset); |
+ #endif |
+ // Position where Call will be patched in. |
+ unsigned call_offset = entry_pc_offset + gap_code_size; |
+ // End of call instruction, if using a direct call to a 64-bit address. |
+ unsigned call_end_offset = |
+ call_offset + MacroAssembler::kCallInstructionLength; |
+ |
+ // Find next deoptimization entry, if any. |
+ unsigned next_pc_offset = 0; |
+ SafepointEntry next_entry = deoptimizations.Next(&next_pc_offset); |
+ |
+ if (!next_entry.is_valid() || next_pc_offset >= call_end_offset) { |
Kevin Millikin (Chromium)
2011/02/04 11:34:54
If the next entry is not valid, I think you should
Lasse Reichstein
2011/02/04 12:32:13
I pad with space enough for one final call in all
|
+ // Room enough to write a long call instruction. |
+ CodePatcher patcher(code->instruction_start() + call_offset, |
+ Assembler::kCallInstructionLength); |
patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY), |
- RelocInfo::NONE); |
- last_pc_offset += patch_size(); |
+ RelocInfo::NONE); |
Kevin Millikin (Chromium)
2011/02/04 11:34:54
Align with the other argument.
Lasse Reichstein
2011/02/04 12:32:13
Done.
|
+ previous_pc_offset = call_end_offset; |
+ } else { |
+ // Not room enough for a long Call instruction. Write a short call |
+ // instruction to a long jump placed elsewhere in the code. |
+ unsigned short_call_end_offset = |
+ call_offset + MacroAssembler::kShortCallInstructionLength; |
+ ASSERT(next_pc_offset >= short_call_end_offset); |
+ |
+ // Write jump in jump-table. |
+ jump_table_offset -= MacroAssembler::kJumpInstructionLength; |
+ CodePatcher jump_patcher(code->instruction_start() + jump_table_offset, |
+ MacroAssembler::kJumpInstructionLength); |
+ jump_patcher.masm()->Jump( |
+ GetDeoptimizationEntry(deoptimization_index, LAZY), |
+ RelocInfo::NONE); |
+ |
+ // Write call to jump at call_offset. |
+ CodePatcher call_patcher(code->instruction_start() + call_offset, |
+ MacroAssembler::kShortCallInstructionLength); |
+ call_patcher.masm()->call(code->instruction_start() + jump_table_offset); |
+ previous_pc_offset = short_call_end_offset; |
} |
Lasse Reichstein
2011/02/04 12:32:13
As discussed offline, I also rewrite to use addres
|
+ |
+ // Continue with next deoptimization entry. |
+ current_entry = next_entry; |
+ entry_pc_offset = next_pc_offset; |
} |
+ |
#ifdef DEBUG |
// Destroy the code which is not supposed to run again. |
- CHECK(code->safepoint_table_offset() >= last_pc_offset); |
- unsigned instructions = code->safepoint_table_offset() - last_pc_offset; |
- CodePatcher destroyer(code->instruction_start() + last_pc_offset, |
- instructions); |
- for (unsigned i = 0; i < instructions; i++) { |
- destroyer.masm()->int3(); |
- } |
+ CHECK(jump_table_offset >= previous_pc_offset); |
+ ZapInstructions(code, previous_pc_offset, |
+ jump_table_offset - previous_pc_offset); |
#endif |
// Add the deoptimizing code to the list. |
@@ -390,7 +477,7 @@ void Deoptimizer::EntryGenerator::Generate() { |
__ pop(Operand(rbx, offset)); |
} |
- // Fill in the double input registers. |
+ // Fill in the double input registers. |
int double_regs_offset = FrameDescription::double_registers_offset(); |
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) { |
int dst_offset = i * kDoubleSize + double_regs_offset; |
@@ -404,7 +491,7 @@ void Deoptimizer::EntryGenerator::Generate() { |
__ addq(rsp, Immediate(2 * kPointerSize)); |
} |
- // Compute a pointer to the unwinding limit in register ecx; that is |
+ // Compute a pointer to the unwinding limit in register rcx; that is |
// the first stack slot not part of the input frame. |
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); |
__ addq(rcx, rsp); |