Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(7)

Side by Side Diff: src/x64/deoptimizer-x64.cc

Issue 6606006: [Isolates] Merge 6500:6700 from bleeding_edge to isolates. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: '' Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/cpu-x64.cc ('k') | src/x64/disasm-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
(...skipping 21 matching lines...) Expand all
33 #include "deoptimizer.h" 33 #include "deoptimizer.h"
34 #include "full-codegen.h" 34 #include "full-codegen.h"
35 #include "safepoint-table.h" 35 #include "safepoint-table.h"
36 36
37 namespace v8 { 37 namespace v8 {
38 namespace internal { 38 namespace internal {
39 39
40 40
41 int Deoptimizer::table_entry_size_ = 10; 41 int Deoptimizer::table_entry_size_ = 10;
42 42
43
44 int Deoptimizer::patch_size() {
45 return MacroAssembler::kCallInstructionLength;
46 }
47
48
49 #ifdef DEBUG
50 // Overwrites code with int3 instructions.
51 static void ZapCodeRange(Address from, Address to) {
52 CHECK(from <= to);
53 int length = static_cast<int>(to - from);
54 CodePatcher destroyer(from, length);
55 while (length-- > 0) {
56 destroyer.masm()->int3();
57 }
58 }
59 #endif
60
61
62 // Iterate through the entries of a SafepointTable that corresponds to
63 // deoptimization points.
64 class SafepointTableDeoptimiztionEntryIterator {
65 public:
66 explicit SafepointTableDeoptimiztionEntryIterator(Code* code)
67 : code_(code), table_(code), index_(-1), limit_(table_.length()) {
68 FindNextIndex();
69 }
70
71 SafepointEntry Next(Address* pc) {
72 if (index_ >= limit_) {
73 *pc = NULL;
74 return SafepointEntry(); // Invalid entry.
75 }
76 *pc = code_->instruction_start() + table_.GetPcOffset(index_);
77 SafepointEntry entry = table_.GetEntry(index_);
78 FindNextIndex();
79 return entry;
80 }
81
82 private:
83 void FindNextIndex() {
84 ASSERT(index_ < limit_);
85 while (++index_ < limit_) {
86 if (table_.GetEntry(index_).deoptimization_index() !=
87 Safepoint::kNoDeoptimizationIndex) {
88 return;
89 }
90 }
91 }
92
93 Code* code_;
94 SafepointTable table_;
95 // Index of next deoptimization entry. If negative after calling
96 // FindNextIndex, there are no more, and Next will return an invalid
97 // SafepointEntry.
98 int index_;
99 // Table length.
100 int limit_;
101 };
102
103
43 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { 104 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
44 AssertNoAllocation no_allocation; 105 AssertNoAllocation no_allocation;
45 106
46 if (!function->IsOptimized()) return; 107 if (!function->IsOptimized()) return;
47 108
48 // Get the optimized code. 109 // Get the optimized code.
49 Code* code = function->code(); 110 Code* code = function->code();
50 111
51 // Invalidate the relocation information, as it will become invalid by the 112 // Invalidate the relocation information, as it will become invalid by the
52 // code patching below, and is not needed any more. 113 // code patching below, and is not needed any more.
53 code->InvalidateRelocation(); 114 code->InvalidateRelocation();
54 115
55 // For each return after a safepoint insert a absolute call to the 116 // For each return after a safepoint insert a absolute call to the
56 // corresponding deoptimization entry. 117 // corresponding deoptimization entry, or a short call to an absolute
57 unsigned last_pc_offset = 0; 118 // jump if space is short. The absolute jumps are put in a table just
58 SafepointTable table(function->code()); 119 // before the safepoint table (space was allocated there when the Code
59 for (unsigned i = 0; i < table.length(); i++) { 120 // object was created, if necessary).
60 unsigned pc_offset = table.GetPcOffset(i); 121
61 SafepointEntry safepoint_entry = table.GetEntry(i); 122 Address instruction_start = function->code()->instruction_start();
62 int deoptimization_index = safepoint_entry.deoptimization_index(); 123 Address jump_table_address =
63 int gap_code_size = safepoint_entry.gap_code_size(); 124 instruction_start + function->code()->safepoint_table_offset();
125 Address previous_pc = instruction_start;
126
127 SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code());
128 Address entry_pc = NULL;
129
130 SafepointEntry current_entry = deoptimizations.Next(&entry_pc);
131 while (current_entry.is_valid()) {
132 int gap_code_size = current_entry.gap_code_size();
133 unsigned deoptimization_index = current_entry.deoptimization_index();
134
64 #ifdef DEBUG 135 #ifdef DEBUG
65 // Destroy the code which is not supposed to run again. 136 // Destroy the code which is not supposed to run again.
66 unsigned instructions = pc_offset - last_pc_offset; 137 ZapCodeRange(previous_pc, entry_pc);
67 CodePatcher destroyer(code->instruction_start() + last_pc_offset,
68 instructions);
69 for (unsigned i = 0; i < instructions; i++) {
70 destroyer.masm()->int3();
71 }
72 #endif 138 #endif
73 last_pc_offset = pc_offset; 139 // Position where Call will be patched in.
74 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { 140 Address call_address = entry_pc + gap_code_size;
75 CodePatcher patcher( 141 // End of call instruction, if using a direct call to a 64-bit address.
76 code->instruction_start() + pc_offset + gap_code_size, 142 Address call_end_address =
77 Assembler::kCallInstructionLength); 143 call_address + MacroAssembler::kCallInstructionLength;
144
145 // Find next deoptimization entry, if any.
146 Address next_pc = NULL;
147 SafepointEntry next_entry = deoptimizations.Next(&next_pc);
148
149 if (!next_entry.is_valid() || next_pc >= call_end_address) {
150 // Room enough to write a long call instruction.
151 CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
78 patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY), 152 patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
79 RelocInfo::NONE); 153 RelocInfo::NONE);
80 last_pc_offset += gap_code_size + Assembler::kCallInstructionLength; 154 previous_pc = call_end_address;
155 } else {
156 // Not room enough for a long Call instruction. Write a short call
157 // instruction to a long jump placed elsewhere in the code.
158 Address short_call_end_address =
159 call_address + MacroAssembler::kShortCallInstructionLength;
160 ASSERT(next_pc >= short_call_end_address);
161
162 // Write jump in jump-table.
163 jump_table_address -= MacroAssembler::kJumpInstructionLength;
164 CodePatcher jump_patcher(jump_table_address,
165 MacroAssembler::kJumpInstructionLength);
166 jump_patcher.masm()->Jump(
167 GetDeoptimizationEntry(deoptimization_index, LAZY),
168 RelocInfo::NONE);
169
170 // Write call to jump at call_offset.
171 CodePatcher call_patcher(call_address,
172 MacroAssembler::kShortCallInstructionLength);
173 call_patcher.masm()->call(jump_table_address);
174 previous_pc = short_call_end_address;
81 } 175 }
176
177 // Continue with next deoptimization entry.
178 current_entry = next_entry;
179 entry_pc = next_pc;
82 } 180 }
181
83 #ifdef DEBUG 182 #ifdef DEBUG
84 // Destroy the code which is not supposed to run again. 183 // Destroy the code which is not supposed to run again.
85 unsigned instructions = code->safepoint_table_start() - last_pc_offset; 184 ZapCodeRange(previous_pc, jump_table_address);
86 CodePatcher destroyer(code->instruction_start() + last_pc_offset,
87 instructions);
88 for (unsigned i = 0; i < instructions; i++) {
89 destroyer.masm()->int3();
90 }
91 #endif 185 #endif
92 186
93 // Add the deoptimizing code to the list. 187 // Add the deoptimizing code to the list.
94 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); 188 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
95 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); 189 DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
96 node->set_next(data->deoptimizing_code_list_); 190 node->set_next(data->deoptimizing_code_list_);
97 data->deoptimizing_code_list_ = node; 191 data->deoptimizing_code_list_ = node;
98 192
99 // Set the code for the function to non-optimized version. 193 // Set the code for the function to non-optimized version.
100 function->ReplaceCode(function->shared()->code()); 194 function->ReplaceCode(function->shared()->code());
101 195
102 if (FLAG_trace_deopt) { 196 if (FLAG_trace_deopt) {
103 PrintF("[forced deoptimization: "); 197 PrintF("[forced deoptimization: ");
104 function->PrintName(); 198 function->PrintName();
105 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); 199 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
106 } 200 }
107 } 201 }
108 202
109 203
110 void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code, 204 void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
111 Code* check_code, 205 Code* check_code,
112 Code* replacement_code) { 206 Code* replacement_code) {
113 UNIMPLEMENTED(); 207 UNIMPLEMENTED();
114 } 208 }
115 209
116 210
117 void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code, 211 void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
118 Code* check_code, 212 Code* check_code,
119 Code* replacement_code) { 213 Code* replacement_code) {
120 UNIMPLEMENTED(); 214 UNIMPLEMENTED();
121 } 215 }
122 216
123 217
124 void Deoptimizer::DoComputeOsrOutputFrame() { 218 void Deoptimizer::DoComputeOsrOutputFrame() {
125 UNIMPLEMENTED(); 219 UNIMPLEMENTED();
126 } 220 }
127 221
128 222
129 void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, 223 void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
(...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after
378 // Preserve deoptimizer object in register rax and get the input 472 // Preserve deoptimizer object in register rax and get the input
379 // frame descriptor pointer. 473 // frame descriptor pointer.
380 __ movq(rbx, Operand(rax, Deoptimizer::input_offset())); 474 __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
381 475
382 // Fill in the input registers. 476 // Fill in the input registers.
383 for (int i = kNumberOfRegisters -1; i >= 0; i--) { 477 for (int i = kNumberOfRegisters -1; i >= 0; i--) {
384 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); 478 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
385 __ pop(Operand(rbx, offset)); 479 __ pop(Operand(rbx, offset));
386 } 480 }
387 481
388 // Fill in the double input registers. 482 // Fill in the double input registers.
389 int double_regs_offset = FrameDescription::double_registers_offset(); 483 int double_regs_offset = FrameDescription::double_registers_offset();
390 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) { 484 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
391 int dst_offset = i * kDoubleSize + double_regs_offset; 485 int dst_offset = i * kDoubleSize + double_regs_offset;
392 __ pop(Operand(rbx, dst_offset)); 486 __ pop(Operand(rbx, dst_offset));
393 } 487 }
394 488
395 // Remove the bailout id from the stack. 489 // Remove the bailout id from the stack.
396 if (type() == EAGER) { 490 if (type() == EAGER) {
397 __ addq(rsp, Immediate(kPointerSize)); 491 __ addq(rsp, Immediate(kPointerSize));
398 } else { 492 } else {
399 __ addq(rsp, Immediate(2 * kPointerSize)); 493 __ addq(rsp, Immediate(2 * kPointerSize));
400 } 494 }
401 495
402 // Compute a pointer to the unwinding limit in register ecx; that is 496 // Compute a pointer to the unwinding limit in register rcx; that is
403 // the first stack slot not part of the input frame. 497 // the first stack slot not part of the input frame.
404 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); 498 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
405 __ addq(rcx, rsp); 499 __ addq(rcx, rsp);
406 500
407 // Unwind the stack down to - but not including - the unwinding 501 // Unwind the stack down to - but not including - the unwinding
408 // limit and copy the contents of the activation frame to the input 502 // limit and copy the contents of the activation frame to the input
409 // frame description. 503 // frame description.
410 __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset())); 504 __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
411 Label pop_loop; 505 Label pop_loop;
412 __ bind(&pop_loop); 506 __ bind(&pop_loop);
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
501 } 595 }
502 __ bind(&done); 596 __ bind(&done);
503 } 597 }
504 598
505 #undef __ 599 #undef __
506 600
507 601
508 } } // namespace v8::internal 602 } } // namespace v8::internal
509 603
510 #endif // V8_TARGET_ARCH_X64 604 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/cpu-x64.cc ('k') | src/x64/disasm-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698