Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(837)

Side by Side Diff: src/x64/deoptimizer-x64.cc

Issue 6347067: Fix potential overwriting of debug jumps of following code. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge/build-x64
Patch Set: Readded DoStoreGlobal code. Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 24 matching lines...) Expand all
35 #include "safepoint-table.h" 35 #include "safepoint-table.h"
36 36
37 namespace v8 { 37 namespace v8 {
38 namespace internal { 38 namespace internal {
39 39
40 40
41 int Deoptimizer::table_entry_size_ = 10; 41 int Deoptimizer::table_entry_size_ = 10;
42 42
43 43
44 int Deoptimizer::patch_size() { 44 int Deoptimizer::patch_size() {
45 return Assembler::kCallInstructionLength; 45 return MacroAssembler::kCallInstructionLength;
46 } 46 }
47 47
48 48
49 #ifdef DEBUG
50 // Overwrites code with int3 instructions.
51 static void ZapInstructions(Code* code, unsigned from_offset, unsigned length) {
Kevin Millikin (Chromium) 2011/02/04 11:34:54 A similar function is called ZapCodeRange on IA32.
Lasse Reichstein 2011/02/04 12:32:13 Renamed.
52 CodePatcher destroyer(code->instruction_start() + from_offset, length);
53 while (length-- > 0) {
54 destroyer.masm()->int3();
55 }
56 }
57 #endif
58
59
60 // Iterate through the entries of a SafepointTable that corresponds to
61 // deoptimization points.
62 class SafepointTableDeoptimiztionEntryIterator {
63 public:
64 explicit SafepointTableDeoptimiztionEntryIterator(SafepointTable* table)
65 : table_(table), index_(-1) {
66 FindNextIndex();
67 }
68
69 SafepointEntry Next(unsigned* pc_offset) {
70 if (index_ < 0) {
71 *pc_offset = 0;
72 return SafepointEntry(); // Invalid entry.
73 }
74 *pc_offset = table_->GetPcOffset(index_);
75 SafepointEntry entry = table_->GetEntry(index_);
76 FindNextIndex();
77 return entry;
78 }
79
80 private:
81 void FindNextIndex() {
82 for (int i = index_ + 1, n = table_->length(); i < n; i++) {
83 if (table_->GetEntry(i).deoptimization_index() !=
84 Safepoint::kNoDeoptimizationIndex) {
85 index_ = i;
86 return;
87 }
88 }
89 // Mark as having no more deoptimization entries.
90 index_ = -1;
Kevin Millikin (Chromium) 2011/02/04 11:34:54 This is also the initial state. It seems like you
Lasse Reichstein 2011/02/04 12:32:13 Done.
91 }
92
93 SafepointTable* table_;
94 // Index of next deoptimization entry. If negative after calling
95 // FindNextIndex, there are no more, and Next will return an invalid
96 // SafepointEntry.
97 int index_;
98 };
99
100
49 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { 101 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
50 AssertNoAllocation no_allocation; 102 AssertNoAllocation no_allocation;
51 103
52 if (!function->IsOptimized()) return; 104 if (!function->IsOptimized()) return;
53 105
54 // Get the optimized code. 106 // Get the optimized code.
55 Code* code = function->code(); 107 Code* code = function->code();
56 108
57 // Invalidate the relocation information, as it will become invalid by the 109 // Invalidate the relocation information, as it will become invalid by the
58 // code patching below, and is not needed any more. 110 // code patching below, and is not needed any more.
59 code->InvalidateRelocation(); 111 code->InvalidateRelocation();
60 112
61 // For each return after a safepoint insert a absolute call to the 113 // For each return after a safepoint insert a absolute call to the
62 // corresponding deoptimization entry. 114 // corresponding deoptimization entry, or a short call to an absolute
63 unsigned last_pc_offset = 0; 115 // jump if space is short. The absolute jumps are put in a table just
116 // before the safepoint table (space was allocated there when the Code
117 // object was created, if necessary).
118 unsigned jump_table_offset = function->code()->safepoint_table_offset();
119 unsigned previous_pc_offset = 0;
64 SafepointTable table(function->code()); 120 SafepointTable table(function->code());
65 for (unsigned i = 0; i < table.length(); i++) { 121 SafepointTableDeoptimiztionEntryIterator deoptimizations(&table);
66 unsigned pc_offset = table.GetPcOffset(i); 122
67 SafepointEntry safepoint_entry = table.GetEntry(i); 123 unsigned entry_pc_offset = 0;
68 int deoptimization_index = safepoint_entry.deoptimization_index(); 124 SafepointEntry current_entry = deoptimizations.Next(&entry_pc_offset);
69 int gap_code_size = safepoint_entry.gap_code_size(); 125
70 #ifdef DEBUG 126 while (current_entry.is_valid()) {
71 // Destroy the code which is not supposed to run again. 127 int gap_code_size = current_entry.gap_code_size();
72 unsigned instructions = pc_offset - last_pc_offset; 128 unsigned deoptimization_index = current_entry.deoptimization_index();
73 CodePatcher destroyer(code->instruction_start() + last_pc_offset, 129
74 instructions); 130 #ifdef DEBUG
Kevin Millikin (Chromium) 2011/02/04 11:34:54 We usually write #ifdef aligned at the left margin
Lasse Reichstein 2011/02/04 12:32:13 Ack. I don't know how this got indented. Probably
75 for (unsigned i = 0; i < instructions; i++) { 131 // Destroy the code which is not supposed to run again.
76 destroyer.masm()->int3(); 132 CHECK(entry_pc_offset >= previous_pc_offset);
133 ZapInstructions(code, previous_pc_offset,
134 entry_pc_offset - previous_pc_offset);
135 #endif
136 // Position where Call will be patched in.
137 unsigned call_offset = entry_pc_offset + gap_code_size;
138 // End of call instruction, if using a direct call to a 64-bit address.
139 unsigned call_end_offset =
140 call_offset + MacroAssembler::kCallInstructionLength;
141
142 // Find next deoptimization entry, if any.
143 unsigned next_pc_offset = 0;
144 SafepointEntry next_entry = deoptimizations.Next(&next_pc_offset);
145
146 if (!next_entry.is_valid() || next_pc_offset >= call_end_offset) {
Kevin Millikin (Chromium) 2011/02/04 11:34:54 If the next entry is not valid, I think you should
Lasse Reichstein 2011/02/04 12:32:13 I pad with space enough for one final call in all
147 // Room enough to write a long call instruction.
148 CodePatcher patcher(code->instruction_start() + call_offset,
149 Assembler::kCallInstructionLength);
150 patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
151 RelocInfo::NONE);
Kevin Millikin (Chromium) 2011/02/04 11:34:54 Align with the other argument.
Lasse Reichstein 2011/02/04 12:32:13 Done.
152 previous_pc_offset = call_end_offset;
153 } else {
154 // Not room enough for a long Call instruction. Write a short call
155 // instruction to a long jump placed elsewhere in the code.
156 unsigned short_call_end_offset =
157 call_offset + MacroAssembler::kShortCallInstructionLength;
158 ASSERT(next_pc_offset >= short_call_end_offset);
159
160 // Write jump in jump-table.
161 jump_table_offset -= MacroAssembler::kJumpInstructionLength;
162 CodePatcher jump_patcher(code->instruction_start() + jump_table_offset,
163 MacroAssembler::kJumpInstructionLength);
164 jump_patcher.masm()->Jump(
165 GetDeoptimizationEntry(deoptimization_index, LAZY),
166 RelocInfo::NONE);
167
168 // Write call to jump at call_offset.
169 CodePatcher call_patcher(code->instruction_start() + call_offset,
170 MacroAssembler::kShortCallInstructionLength);
171 call_patcher.masm()->call(code->instruction_start() + jump_table_offset);
172 previous_pc_offset = short_call_end_offset;
77 } 173 }
Lasse Reichstein 2011/02/04 12:32:13 As discussed offline, I also rewrite to use addres
78 #endif 174
79 last_pc_offset = pc_offset; 175 // Continue with next deoptimization entry.
80 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { 176 current_entry = next_entry;
81 last_pc_offset += gap_code_size; 177 entry_pc_offset = next_pc_offset;
82 CodePatcher patcher(code->instruction_start() + last_pc_offset,
83 patch_size());
84 patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
85 RelocInfo::NONE);
86 last_pc_offset += patch_size();
87 }
88 } 178 }
179
89 #ifdef DEBUG 180 #ifdef DEBUG
90 // Destroy the code which is not supposed to run again. 181 // Destroy the code which is not supposed to run again.
91 CHECK(code->safepoint_table_offset() >= last_pc_offset); 182 CHECK(jump_table_offset >= previous_pc_offset);
92 unsigned instructions = code->safepoint_table_offset() - last_pc_offset; 183 ZapInstructions(code, previous_pc_offset,
93 CodePatcher destroyer(code->instruction_start() + last_pc_offset, 184 jump_table_offset - previous_pc_offset);
94 instructions);
95 for (unsigned i = 0; i < instructions; i++) {
96 destroyer.masm()->int3();
97 }
98 #endif 185 #endif
99 186
100 // Add the deoptimizing code to the list. 187 // Add the deoptimizing code to the list.
101 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); 188 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
102 node->set_next(deoptimizing_code_list_); 189 node->set_next(deoptimizing_code_list_);
103 deoptimizing_code_list_ = node; 190 deoptimizing_code_list_ = node;
104 191
105 // Set the code for the function to non-optimized version. 192 // Set the code for the function to non-optimized version.
106 function->ReplaceCode(function->shared()->code()); 193 function->ReplaceCode(function->shared()->code());
107 194
(...skipping 275 matching lines...) Expand 10 before | Expand all | Expand 10 after
383 // Preserve deoptimizer object in register rax and get the input 470 // Preserve deoptimizer object in register rax and get the input
384 // frame descriptor pointer. 471 // frame descriptor pointer.
385 __ movq(rbx, Operand(rax, Deoptimizer::input_offset())); 472 __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
386 473
387 // Fill in the input registers. 474 // Fill in the input registers.
388 for (int i = kNumberOfRegisters -1; i >= 0; i--) { 475 for (int i = kNumberOfRegisters -1; i >= 0; i--) {
389 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); 476 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
390 __ pop(Operand(rbx, offset)); 477 __ pop(Operand(rbx, offset));
391 } 478 }
392 479
393 // Fill in the double input registers. 480 // Fill in the double input registers.
394 int double_regs_offset = FrameDescription::double_registers_offset(); 481 int double_regs_offset = FrameDescription::double_registers_offset();
395 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) { 482 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
396 int dst_offset = i * kDoubleSize + double_regs_offset; 483 int dst_offset = i * kDoubleSize + double_regs_offset;
397 __ pop(Operand(rbx, dst_offset)); 484 __ pop(Operand(rbx, dst_offset));
398 } 485 }
399 486
400 // Remove the bailout id from the stack. 487 // Remove the bailout id from the stack.
401 if (type() == EAGER) { 488 if (type() == EAGER) {
402 __ addq(rsp, Immediate(kPointerSize)); 489 __ addq(rsp, Immediate(kPointerSize));
403 } else { 490 } else {
404 __ addq(rsp, Immediate(2 * kPointerSize)); 491 __ addq(rsp, Immediate(2 * kPointerSize));
405 } 492 }
406 493
407 // Compute a pointer to the unwinding limit in register ecx; that is 494 // Compute a pointer to the unwinding limit in register rcx; that is
408 // the first stack slot not part of the input frame. 495 // the first stack slot not part of the input frame.
409 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); 496 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
410 __ addq(rcx, rsp); 497 __ addq(rcx, rsp);
411 498
412 // Unwind the stack down to - but not including - the unwinding 499 // Unwind the stack down to - but not including - the unwinding
413 // limit and copy the contents of the activation frame to the input 500 // limit and copy the contents of the activation frame to the input
414 // frame description. 501 // frame description.
415 __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset())); 502 __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
416 Label pop_loop; 503 Label pop_loop;
417 __ bind(&pop_loop); 504 __ bind(&pop_loop);
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
506 } 593 }
507 __ bind(&done); 594 __ bind(&done);
508 } 595 }
509 596
510 #undef __ 597 #undef __
511 598
512 599
513 } } // namespace v8::internal 600 } } // namespace v8::internal
514 601
515 #endif // V8_TARGET_ARCH_X64 602 #endif // V8_TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698