| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
| 15 // | 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #if defined(V8_TARGET_ARCH_X64) |
| 31 |
| 30 #include "codegen.h" | 32 #include "codegen.h" |
| 31 #include "deoptimizer.h" | 33 #include "deoptimizer.h" |
| 32 #include "full-codegen.h" | 34 #include "full-codegen.h" |
| 33 #include "safepoint-table.h" | 35 #include "safepoint-table.h" |
| 34 | 36 |
| 35 namespace v8 { | 37 namespace v8 { |
| 36 namespace internal { | 38 namespace internal { |
| 37 | 39 |
| 38 | 40 |
| 39 int Deoptimizer::table_entry_size_ = 10; | 41 int Deoptimizer::table_entry_size_ = 10; |
| 40 | 42 |
| 43 |
| 44 int Deoptimizer::patch_size() { |
| 45 return MacroAssembler::kCallInstructionLength; |
| 46 } |
| 47 |
| 48 |
| 49 #ifdef DEBUG |
| 50 // Overwrites code with int3 instructions. |
| 51 static void ZapCodeRange(Address from, Address to) { |
| 52 CHECK(from <= to); |
| 53 int length = static_cast<int>(to - from); |
| 54 CodePatcher destroyer(from, length); |
| 55 while (length-- > 0) { |
| 56 destroyer.masm()->int3(); |
| 57 } |
| 58 } |
| 59 #endif |
| 60 |
| 61 |
| 62 // Iterate through the entries of a SafepointTable that corresponds to |
| 63 // deoptimization points. |
| 64 class SafepointTableDeoptimiztionEntryIterator { |
| 65 public: |
| 66 explicit SafepointTableDeoptimiztionEntryIterator(Code* code) |
| 67 : code_(code), table_(code), index_(-1), limit_(table_.length()) { |
| 68 FindNextIndex(); |
| 69 } |
| 70 |
| 71 SafepointEntry Next(Address* pc) { |
| 72 if (index_ >= limit_) { |
| 73 *pc = NULL; |
| 74 return SafepointEntry(); // Invalid entry. |
| 75 } |
| 76 *pc = code_->instruction_start() + table_.GetPcOffset(index_); |
| 77 SafepointEntry entry = table_.GetEntry(index_); |
| 78 FindNextIndex(); |
| 79 return entry; |
| 80 } |
| 81 |
| 82 private: |
| 83 void FindNextIndex() { |
| 84 ASSERT(index_ < limit_); |
| 85 while (++index_ < limit_) { |
| 86 if (table_.GetEntry(index_).deoptimization_index() != |
| 87 Safepoint::kNoDeoptimizationIndex) { |
| 88 return; |
| 89 } |
| 90 } |
| 91 } |
| 92 |
| 93 Code* code_; |
| 94 SafepointTable table_; |
| 95 // Index of next deoptimization entry. If negative after calling |
| 96 // FindNextIndex, there are no more, and Next will return an invalid |
| 97 // SafepointEntry. |
| 98 int index_; |
| 99 // Table length. |
| 100 int limit_; |
| 101 }; |
| 102 |
| 103 |
| 41 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { | 104 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
| 42 UNIMPLEMENTED(); | 105 AssertNoAllocation no_allocation; |
| 43 } | 106 |
| 44 | 107 if (!function->IsOptimized()) return; |
| 45 | 108 |
| 46 void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo, | 109 // Get the optimized code. |
| 47 Code* replacement_code) { | 110 Code* code = function->code(); |
| 48 UNIMPLEMENTED(); | 111 |
| 49 } | 112 // Invalidate the relocation information, as it will become invalid by the |
| 50 | 113 // code patching below, and is not needed any more. |
| 51 | 114 code->InvalidateRelocation(); |
| 52 void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) { | 115 |
| 53 UNIMPLEMENTED(); | 116 // For each return after a safepoint insert a absolute call to the |
| 54 } | 117 // corresponding deoptimization entry, or a short call to an absolute |
| 55 | 118 // jump if space is short. The absolute jumps are put in a table just |
| 56 | 119 // before the safepoint table (space was allocated there when the Code |
| 120 // object was created, if necessary). |
| 121 |
| 122 Address instruction_start = function->code()->instruction_start(); |
| 123 Address jump_table_address = |
| 124 instruction_start + function->code()->safepoint_table_offset(); |
| 125 Address previous_pc = instruction_start; |
| 126 |
| 127 SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code()); |
| 128 Address entry_pc = NULL; |
| 129 |
| 130 SafepointEntry current_entry = deoptimizations.Next(&entry_pc); |
| 131 while (current_entry.is_valid()) { |
| 132 int gap_code_size = current_entry.gap_code_size(); |
| 133 unsigned deoptimization_index = current_entry.deoptimization_index(); |
| 134 |
| 135 #ifdef DEBUG |
| 136 // Destroy the code which is not supposed to run again. |
| 137 ZapCodeRange(previous_pc, entry_pc); |
| 138 #endif |
| 139 // Position where Call will be patched in. |
| 140 Address call_address = entry_pc + gap_code_size; |
| 141 // End of call instruction, if using a direct call to a 64-bit address. |
| 142 Address call_end_address = |
| 143 call_address + MacroAssembler::kCallInstructionLength; |
| 144 |
| 145 // Find next deoptimization entry, if any. |
| 146 Address next_pc = NULL; |
| 147 SafepointEntry next_entry = deoptimizations.Next(&next_pc); |
| 148 |
| 149 if (!next_entry.is_valid() || next_pc >= call_end_address) { |
| 150 // Room enough to write a long call instruction. |
| 151 CodePatcher patcher(call_address, Assembler::kCallInstructionLength); |
| 152 patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY), |
| 153 RelocInfo::NONE); |
| 154 previous_pc = call_end_address; |
| 155 } else { |
| 156 // Not room enough for a long Call instruction. Write a short call |
| 157 // instruction to a long jump placed elsewhere in the code. |
| 158 Address short_call_end_address = |
| 159 call_address + MacroAssembler::kShortCallInstructionLength; |
| 160 ASSERT(next_pc >= short_call_end_address); |
| 161 |
| 162 // Write jump in jump-table. |
| 163 jump_table_address -= MacroAssembler::kJumpInstructionLength; |
| 164 CodePatcher jump_patcher(jump_table_address, |
| 165 MacroAssembler::kJumpInstructionLength); |
| 166 jump_patcher.masm()->Jump( |
| 167 GetDeoptimizationEntry(deoptimization_index, LAZY), |
| 168 RelocInfo::NONE); |
| 169 |
| 170 // Write call to jump at call_offset. |
| 171 CodePatcher call_patcher(call_address, |
| 172 MacroAssembler::kShortCallInstructionLength); |
| 173 call_patcher.masm()->call(jump_table_address); |
| 174 previous_pc = short_call_end_address; |
| 175 } |
| 176 |
| 177 // Continue with next deoptimization entry. |
| 178 current_entry = next_entry; |
| 179 entry_pc = next_pc; |
| 180 } |
| 181 |
| 182 #ifdef DEBUG |
| 183 // Destroy the code which is not supposed to run again. |
| 184 ZapCodeRange(previous_pc, jump_table_address); |
| 185 #endif |
| 186 |
| 187 // Add the deoptimizing code to the list. |
| 188 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
| 189 node->set_next(deoptimizing_code_list_); |
| 190 deoptimizing_code_list_ = node; |
| 191 |
| 192 // Set the code for the function to non-optimized version. |
| 193 function->ReplaceCode(function->shared()->code()); |
| 194 |
| 195 if (FLAG_trace_deopt) { |
| 196 PrintF("[forced deoptimization: "); |
| 197 function->PrintName(); |
| 198 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); |
| 199 } |
| 200 } |
| 201 |
| 202 |
| 203 void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, |
| 204 Code* check_code, |
| 205 Code* replacement_code) { |
| 206 Address call_target_address = pc_after - kIntSize; |
| 207 ASSERT(check_code->entry() == |
| 208 Assembler::target_address_at(call_target_address)); |
| 209 // The stack check code matches the pattern: |
| 210 // |
| 211 // cmp rsp, <limit> |
| 212 // jae ok |
| 213 // call <stack guard> |
| 214 // test rax, <loop nesting depth> |
| 215 // ok: ... |
| 216 // |
| 217 // We will patch away the branch so the code is: |
| 218 // |
| 219 // cmp rsp, <limit> ;; Not changed |
| 220 // nop |
| 221 // nop |
| 222 // call <on-stack replacment> |
| 223 // test rax, <loop nesting depth> |
| 224 // ok: |
| 225 // |
| 226 ASSERT(*(call_target_address - 3) == 0x73 && // jae |
| 227 *(call_target_address - 2) == 0x05 && // offset |
| 228 *(call_target_address - 1) == 0xe8); // call |
| 229 *(call_target_address - 3) = 0x90; // nop |
| 230 *(call_target_address - 2) = 0x90; // nop |
| 231 Assembler::set_target_address_at(call_target_address, |
| 232 replacement_code->entry()); |
| 233 } |
| 234 |
| 235 |
| 236 void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, |
| 237 Code* check_code, |
| 238 Code* replacement_code) { |
| 239 Address call_target_address = pc_after - kIntSize; |
| 240 ASSERT(replacement_code->entry() == |
| 241 Assembler::target_address_at(call_target_address)); |
| 242 // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to |
| 243 // restore the conditional branch. |
| 244 ASSERT(*(call_target_address - 3) == 0x90 && // nop |
| 245 *(call_target_address - 2) == 0x90 && // nop |
| 246 *(call_target_address - 1) == 0xe8); // call |
| 247 *(call_target_address - 3) = 0x73; // jae |
| 248 *(call_target_address - 2) = 0x05; // offset |
| 249 Assembler::set_target_address_at(call_target_address, |
| 250 check_code->entry()); |
| 251 } |
| 252 |
| 253 |
| 57 void Deoptimizer::DoComputeOsrOutputFrame() { | 254 void Deoptimizer::DoComputeOsrOutputFrame() { |
| 58 UNIMPLEMENTED(); | 255 UNIMPLEMENTED(); |
| 59 } | 256 } |
| 60 | 257 |
| 61 | 258 |
| 62 void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, | 259 void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, |
| 63 int frame_index) { | 260 int frame_index) { |
| 64 UNIMPLEMENTED(); | 261 // Read the ast node id, function, and frame height for this output frame. |
| 262 Translation::Opcode opcode = |
| 263 static_cast<Translation::Opcode>(iterator->Next()); |
| 264 USE(opcode); |
| 265 ASSERT(Translation::FRAME == opcode); |
| 266 int node_id = iterator->Next(); |
| 267 JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); |
| 268 unsigned height = iterator->Next(); |
| 269 unsigned height_in_bytes = height * kPointerSize; |
| 270 if (FLAG_trace_deopt) { |
| 271 PrintF(" translating "); |
| 272 function->PrintName(); |
| 273 PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes); |
| 274 } |
| 275 |
| 276 // The 'fixed' part of the frame consists of the incoming parameters and |
| 277 // the part described by JavaScriptFrameConstants. |
| 278 unsigned fixed_frame_size = ComputeFixedSize(function); |
| 279 unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize()); |
| 280 unsigned output_frame_size = height_in_bytes + fixed_frame_size; |
| 281 |
| 282 // Allocate and store the output frame description. |
| 283 FrameDescription* output_frame = |
| 284 new(output_frame_size) FrameDescription(output_frame_size, function); |
| 285 |
| 286 bool is_bottommost = (0 == frame_index); |
| 287 bool is_topmost = (output_count_ - 1 == frame_index); |
| 288 ASSERT(frame_index >= 0 && frame_index < output_count_); |
| 289 ASSERT(output_[frame_index] == NULL); |
| 290 output_[frame_index] = output_frame; |
| 291 |
| 292 // The top address for the bottommost output frame can be computed from |
| 293 // the input frame pointer and the output frame's height. For all |
| 294 // subsequent output frames, it can be computed from the previous one's |
| 295 // top address and the current frame's size. |
| 296 intptr_t top_address; |
| 297 if (is_bottommost) { |
| 298 // 2 = context and function in the frame. |
| 299 top_address = |
| 300 input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes; |
| 301 } else { |
| 302 top_address = output_[frame_index - 1]->GetTop() - output_frame_size; |
| 303 } |
| 304 output_frame->SetTop(top_address); |
| 305 |
| 306 // Compute the incoming parameter translation. |
| 307 int parameter_count = function->shared()->formal_parameter_count() + 1; |
| 308 unsigned output_offset = output_frame_size; |
| 309 unsigned input_offset = input_frame_size; |
| 310 for (int i = 0; i < parameter_count; ++i) { |
| 311 output_offset -= kPointerSize; |
| 312 DoTranslateCommand(iterator, frame_index, output_offset); |
| 313 } |
| 314 input_offset -= (parameter_count * kPointerSize); |
| 315 |
| 316 // There are no translation commands for the caller's pc and fp, the |
| 317 // context, and the function. Synthesize their values and set them up |
| 318 // explicitly. |
| 319 // |
| 320 // The caller's pc for the bottommost output frame is the same as in the |
| 321 // input frame. For all subsequent output frames, it can be read from the |
| 322 // previous one. This frame's pc can be computed from the non-optimized |
| 323 // function code and AST id of the bailout. |
| 324 output_offset -= kPointerSize; |
| 325 input_offset -= kPointerSize; |
| 326 intptr_t value; |
| 327 if (is_bottommost) { |
| 328 value = input_->GetFrameSlot(input_offset); |
| 329 } else { |
| 330 value = output_[frame_index - 1]->GetPc(); |
| 331 } |
| 332 output_frame->SetFrameSlot(output_offset, value); |
| 333 if (FLAG_trace_deopt) { |
| 334 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
| 335 V8PRIxPTR " ; caller's pc\n", |
| 336 top_address + output_offset, output_offset, value); |
| 337 } |
| 338 |
| 339 // The caller's frame pointer for the bottommost output frame is the same |
| 340 // as in the input frame. For all subsequent output frames, it can be |
| 341 // read from the previous one. Also compute and set this frame's frame |
| 342 // pointer. |
| 343 output_offset -= kPointerSize; |
| 344 input_offset -= kPointerSize; |
| 345 if (is_bottommost) { |
| 346 value = input_->GetFrameSlot(input_offset); |
| 347 } else { |
| 348 value = output_[frame_index - 1]->GetFp(); |
| 349 } |
| 350 output_frame->SetFrameSlot(output_offset, value); |
| 351 intptr_t fp_value = top_address + output_offset; |
| 352 ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value); |
| 353 output_frame->SetFp(fp_value); |
| 354 if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value); |
| 355 if (FLAG_trace_deopt) { |
| 356 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
| 357 V8PRIxPTR " ; caller's fp\n", |
| 358 fp_value, output_offset, value); |
| 359 } |
| 360 |
| 361 // The context can be gotten from the function so long as we don't |
| 362 // optimize functions that need local contexts. |
| 363 output_offset -= kPointerSize; |
| 364 input_offset -= kPointerSize; |
| 365 value = reinterpret_cast<intptr_t>(function->context()); |
| 366 // The context for the bottommost output frame should also agree with the |
| 367 // input frame. |
| 368 ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); |
| 369 output_frame->SetFrameSlot(output_offset, value); |
| 370 if (is_topmost) output_frame->SetRegister(rsi.code(), value); |
| 371 if (FLAG_trace_deopt) { |
| 372 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
| 373 V8PRIxPTR "; context\n", |
| 374 top_address + output_offset, output_offset, value); |
| 375 } |
| 376 |
| 377 // The function was mentioned explicitly in the BEGIN_FRAME. |
| 378 output_offset -= kPointerSize; |
| 379 input_offset -= kPointerSize; |
| 380 value = reinterpret_cast<intptr_t>(function); |
| 381 // The function for the bottommost output frame should also agree with the |
| 382 // input frame. |
| 383 ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); |
| 384 output_frame->SetFrameSlot(output_offset, value); |
| 385 if (FLAG_trace_deopt) { |
| 386 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
| 387 V8PRIxPTR "; function\n", |
| 388 top_address + output_offset, output_offset, value); |
| 389 } |
| 390 |
| 391 // Translate the rest of the frame. |
| 392 for (unsigned i = 0; i < height; ++i) { |
| 393 output_offset -= kPointerSize; |
| 394 DoTranslateCommand(iterator, frame_index, output_offset); |
| 395 } |
| 396 ASSERT(0 == output_offset); |
| 397 |
| 398 // Compute this frame's PC, state, and continuation. |
| 399 Code* non_optimized_code = function->shared()->code(); |
| 400 FixedArray* raw_data = non_optimized_code->deoptimization_data(); |
| 401 DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data); |
| 402 Address start = non_optimized_code->instruction_start(); |
| 403 unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared()); |
| 404 unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state); |
| 405 intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset); |
| 406 output_frame->SetPc(pc_value); |
| 407 |
| 408 FullCodeGenerator::State state = |
| 409 FullCodeGenerator::StateField::decode(pc_and_state); |
| 410 output_frame->SetState(Smi::FromInt(state)); |
| 411 |
| 412 // Set the continuation for the topmost frame. |
| 413 if (is_topmost) { |
| 414 Code* continuation = (bailout_type_ == EAGER) |
| 415 ? Builtins::builtin(Builtins::NotifyDeoptimized) |
| 416 : Builtins::builtin(Builtins::NotifyLazyDeoptimized); |
| 417 output_frame->SetContinuation( |
| 418 reinterpret_cast<intptr_t>(continuation->entry())); |
| 419 } |
| 420 |
| 421 if (output_count_ - 1 == frame_index) iterator->Done(); |
| 65 } | 422 } |
| 66 | 423 |
| 67 | 424 |
| 425 #define __ masm()-> |
| 426 |
| 68 void Deoptimizer::EntryGenerator::Generate() { | 427 void Deoptimizer::EntryGenerator::Generate() { |
| 69 UNIMPLEMENTED(); | 428 GeneratePrologue(); |
| 429 CpuFeatures::Scope scope(SSE2); |
| 430 |
| 431 // Save all general purpose registers before messing with them. |
| 432 const int kNumberOfRegisters = Register::kNumRegisters; |
| 433 |
| 434 const int kDoubleRegsSize = kDoubleSize * |
| 435 XMMRegister::kNumAllocatableRegisters; |
| 436 __ subq(rsp, Immediate(kDoubleRegsSize)); |
| 437 |
| 438 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { |
| 439 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); |
| 440 int offset = i * kDoubleSize; |
| 441 __ movsd(Operand(rsp, offset), xmm_reg); |
| 442 } |
| 443 |
| 444 // We push all registers onto the stack, even though we do not need |
| 445 // to restore all later. |
| 446 for (int i = 0; i < kNumberOfRegisters; i++) { |
| 447 Register r = Register::toRegister(i); |
| 448 __ push(r); |
| 449 } |
| 450 |
| 451 const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize + |
| 452 kDoubleRegsSize; |
| 453 |
| 454 // When calling new_deoptimizer_function we need to pass the last argument |
| 455 // on the stack on windows and in r8 on linux. The remaining arguments are |
| 456 // all passed in registers (different ones on linux and windows though). |
| 457 |
| 458 #ifdef _WIN64 |
| 459 Register arg4 = r9; |
| 460 Register arg3 = r8; |
| 461 Register arg2 = rdx; |
| 462 Register arg1 = rcx; |
| 463 #else |
| 464 Register arg4 = rcx; |
| 465 Register arg3 = rdx; |
| 466 Register arg2 = rsi; |
| 467 Register arg1 = rdi; |
| 468 #endif |
| 469 |
| 470 // We use this to keep the value of the fifth argument temporarily. |
| 471 // Unfortunately we can't store it directly in r8 (used for passing |
| 472 // this on linux), since it is another parameter passing register on windows. |
| 473 Register arg5 = r11; |
| 474 |
| 475 // Get the bailout id from the stack. |
| 476 __ movq(arg3, Operand(rsp, kSavedRegistersAreaSize)); |
| 477 |
| 478 // Get the address of the location in the code object if possible |
| 479 // and compute the fp-to-sp delta in register arg5. |
| 480 if (type() == EAGER) { |
| 481 __ Set(arg4, 0); |
| 482 __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); |
| 483 } else { |
| 484 __ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); |
| 485 __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize)); |
| 486 } |
| 487 |
| 488 __ subq(arg5, rbp); |
| 489 __ neg(arg5); |
| 490 |
| 491 // Allocate a new deoptimizer object. |
| 492 __ PrepareCallCFunction(5); |
| 493 __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); |
| 494 __ movq(arg1, rax); |
| 495 __ movq(arg2, Immediate(type())); |
| 496 // Args 3 and 4 are already in the right registers. |
| 497 |
| 498 // On windows put the argument on the stack (PrepareCallCFunction have |
| 499 // created space for this). On linux pass the argument in r8. |
| 500 #ifdef _WIN64 |
| 501 __ movq(Operand(rsp, 0 * kPointerSize), arg5); |
| 502 #else |
| 503 __ movq(r8, arg5); |
| 504 #endif |
| 505 |
| 506 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5); |
| 507 // Preserve deoptimizer object in register rax and get the input |
| 508 // frame descriptor pointer. |
| 509 __ movq(rbx, Operand(rax, Deoptimizer::input_offset())); |
| 510 |
| 511 // Fill in the input registers. |
| 512 for (int i = kNumberOfRegisters -1; i >= 0; i--) { |
| 513 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
| 514 __ pop(Operand(rbx, offset)); |
| 515 } |
| 516 |
| 517 // Fill in the double input registers. |
| 518 int double_regs_offset = FrameDescription::double_registers_offset(); |
| 519 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) { |
| 520 int dst_offset = i * kDoubleSize + double_regs_offset; |
| 521 __ pop(Operand(rbx, dst_offset)); |
| 522 } |
| 523 |
| 524 // Remove the bailout id from the stack. |
| 525 if (type() == EAGER) { |
| 526 __ addq(rsp, Immediate(kPointerSize)); |
| 527 } else { |
| 528 __ addq(rsp, Immediate(2 * kPointerSize)); |
| 529 } |
| 530 |
| 531 // Compute a pointer to the unwinding limit in register rcx; that is |
| 532 // the first stack slot not part of the input frame. |
| 533 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); |
| 534 __ addq(rcx, rsp); |
| 535 |
| 536 // Unwind the stack down to - but not including - the unwinding |
| 537 // limit and copy the contents of the activation frame to the input |
| 538 // frame description. |
| 539 __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset())); |
| 540 Label pop_loop; |
| 541 __ bind(&pop_loop); |
| 542 __ pop(Operand(rdx, 0)); |
| 543 __ addq(rdx, Immediate(sizeof(intptr_t))); |
| 544 __ cmpq(rcx, rsp); |
| 545 __ j(not_equal, &pop_loop); |
| 546 |
| 547 // Compute the output frame in the deoptimizer. |
| 548 __ push(rax); |
| 549 __ PrepareCallCFunction(1); |
| 550 __ movq(arg1, rax); |
| 551 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); |
| 552 __ pop(rax); |
| 553 |
| 554 // Replace the current frame with the output frames. |
| 555 Label outer_push_loop, inner_push_loop; |
| 556 // Outer loop state: rax = current FrameDescription**, rdx = one past the |
| 557 // last FrameDescription**. |
| 558 __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset())); |
| 559 __ movq(rax, Operand(rax, Deoptimizer::output_offset())); |
| 560 __ lea(rdx, Operand(rax, rdx, times_8, 0)); |
| 561 __ bind(&outer_push_loop); |
| 562 // Inner loop state: rbx = current FrameDescription*, rcx = loop index. |
| 563 __ movq(rbx, Operand(rax, 0)); |
| 564 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); |
| 565 __ bind(&inner_push_loop); |
| 566 __ subq(rcx, Immediate(sizeof(intptr_t))); |
| 567 __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset())); |
| 568 __ testq(rcx, rcx); |
| 569 __ j(not_zero, &inner_push_loop); |
| 570 __ addq(rax, Immediate(kPointerSize)); |
| 571 __ cmpq(rax, rdx); |
| 572 __ j(below, &outer_push_loop); |
| 573 |
| 574 // In case of OSR, we have to restore the XMM registers. |
| 575 if (type() == OSR) { |
| 576 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { |
| 577 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); |
| 578 int src_offset = i * kDoubleSize + double_regs_offset; |
| 579 __ movsd(xmm_reg, Operand(rbx, src_offset)); |
| 580 } |
| 581 } |
| 582 |
| 583 // Push state, pc, and continuation from the last output frame. |
| 584 if (type() != OSR) { |
| 585 __ push(Operand(rbx, FrameDescription::state_offset())); |
| 586 } |
| 587 __ push(Operand(rbx, FrameDescription::pc_offset())); |
| 588 __ push(Operand(rbx, FrameDescription::continuation_offset())); |
| 589 |
| 590 // Push the registers from the last output frame. |
| 591 for (int i = 0; i < kNumberOfRegisters; i++) { |
| 592 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
| 593 __ push(Operand(rbx, offset)); |
| 594 } |
| 595 |
| 596 // Restore the registers from the stack. |
| 597 for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) { |
| 598 Register r = Register::toRegister(i); |
| 599 // Do not restore rsp, simply pop the value into the next register |
| 600 // and overwrite this afterwards. |
| 601 if (r.is(rsp)) { |
| 602 ASSERT(i > 0); |
| 603 r = Register::toRegister(i - 1); |
| 604 } |
| 605 __ pop(r); |
| 606 } |
| 607 |
| 608 // Set up the roots register. |
| 609 ExternalReference roots_address = ExternalReference::roots_address(); |
| 610 __ movq(r13, roots_address); |
| 611 |
| 612 __ movq(kSmiConstantRegister, |
| 613 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), |
| 614 RelocInfo::NONE); |
| 615 |
| 616 // Return to the continuation point. |
| 617 __ ret(0); |
| 70 } | 618 } |
| 71 | 619 |
| 72 | 620 |
| 73 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { | 621 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
| 74 UNIMPLEMENTED(); | 622 // Create a sequence of deoptimization entries. |
| 623 Label done; |
| 624 for (int i = 0; i < count(); i++) { |
| 625 int start = masm()->pc_offset(); |
| 626 USE(start); |
| 627 __ push_imm32(i); |
| 628 __ jmp(&done); |
| 629 ASSERT(masm()->pc_offset() - start == table_entry_size_); |
| 630 } |
| 631 __ bind(&done); |
| 75 } | 632 } |
| 76 | 633 |
| 634 #undef __ |
| 635 |
| 636 |
| 77 } } // namespace v8::internal | 637 } } // namespace v8::internal |
| 638 |
| 639 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |