Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 46 if (!function->IsOptimized()) return; | 46 if (!function->IsOptimized()) return; |
| 47 | 47 |
| 48 // Get the optimized code. | 48 // Get the optimized code. |
| 49 Code* code = function->code(); | 49 Code* code = function->code(); |
| 50 | 50 |
| 51 // Invalidate the relocation information, as it will become invalid by the | 51 // Invalidate the relocation information, as it will become invalid by the |
| 52 // code patching below, and is not needed any more. | 52 // code patching below, and is not needed any more. |
| 53 code->InvalidateRelocation(); | 53 code->InvalidateRelocation(); |
| 54 | 54 |
| 55 // For each return after a safepoint insert a absolute call to the | 55 // For each return after a safepoint insert a absolute call to the |
| 56 // corresponding deoptimization entry. | 56 // corresponding deoptimization entry, or a short call to an absolute |
| 57 // jump if space is short. | |
| 58 unsigned jump_table = function->code()->safepoint_table_start(); | |
| 57 unsigned last_pc_offset = 0; | 59 unsigned last_pc_offset = 0; |
| 58 SafepointTable table(function->code()); | 60 SafepointTable table(function->code()); |
| 59 for (unsigned i = 0; i < table.length(); i++) { | 61 for (unsigned i = 0; i < table.length(); i++) { |
| 60 unsigned pc_offset = table.GetPcOffset(i); | 62 unsigned pc_offset = table.GetPcOffset(i); |
| 61 SafepointEntry safepoint_entry = table.GetEntry(i); | 63 SafepointEntry safepoint_entry = table.GetEntry(i); |
| 62 int deoptimization_index = safepoint_entry.deoptimization_index(); | 64 int deoptimization_index = safepoint_entry.deoptimization_index(); |
| 63 int gap_code_size = safepoint_entry.gap_code_size(); | 65 int gap_code_size = safepoint_entry.gap_code_size(); |
| 64 #ifdef DEBUG | 66 #ifdef DEBUG |
| 65 // Destroy the code which is not supposed to run again. | 67 // Destroy the code which is not supposed to run again. |
| 68 CHECK(pc_offset >= last_pc_offset); | |
| 66 unsigned instructions = pc_offset - last_pc_offset; | 69 unsigned instructions = pc_offset - last_pc_offset; |
| 67 CodePatcher destroyer(code->instruction_start() + last_pc_offset, | 70 CodePatcher destroyer(code->instruction_start() + last_pc_offset, |
| 68 instructions); | 71 instructions); |
| 69 for (unsigned i = 0; i < instructions; i++) { | 72 while (instructions > 0) { |
|
Kevin Millikin (Chromium)
2011/02/02 13:05:33
This whole code hunk in #ifdef DEBUG is repeated b
Lasse Reichstein
2011/02/03 14:14:12
Moved int3-writing to function called ZapInstructi
Kevin Millikin (Chromium)
2011/02/04 10:27:29
You can (but I'm not suggesting you do) write it a
| |
| 70 destroyer.masm()->int3(); | 73 destroyer.masm()->int3(); |
| 74 instructions--; | |
| 71 } | 75 } |
| 72 #endif | 76 #endif |
| 73 last_pc_offset = pc_offset; | 77 last_pc_offset = pc_offset; |
| 74 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { | 78 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { |
| 75 CodePatcher patcher( | 79 int call_length = MacroAssembler::kCallInstructionLength; |
|
Kevin Millikin (Chromium)
2011/02/02 13:05:33
I find all the code below confusing.
You only rea
Lasse Reichstein
2011/02/03 14:14:12
The code is generally confusing. I have refactored
| |
| 76 code->instruction_start() + pc_offset + gap_code_size, | 80 // Check if the next entry (if any) is located so close that |
| 77 Assembler::kCallInstructionLength); | 81 // we can't write a long Call sequence. |
| 78 patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY), | 82 int end_of_long_call = |
| 79 RelocInfo::NONE); | 83 pc_offset + gap_code_size + MacroAssembler::kCallInstructionLength; |
|
Kevin Millikin (Chromium)
2011/02/02 13:05:33
All of the code in this if (deoptimization_index !
| |
| 80 last_pc_offset += gap_code_size + Assembler::kCallInstructionLength; | 84 for (unsigned j = i + 1; j < table.length(); j++) { |
| 85 // Stop if the next entry ends later than the call would. | |
| 86 int next_pc = table.GetPcOffset(j); | |
|
Kevin Millikin (Chromium)
2011/02/02 13:05:33
Call this next_pc_offset of something so it's clea
Lasse Reichstein
2011/02/03 14:14:12
Done.
| |
| 87 if (next_pc >= end_of_long_call) break; | |
| 88 // Only care if the entry is a deoptimization point. | |
| 89 if (table.GetEntry(j).deoptimization_index() != | |
| 90 Safepoint::kNoDeoptimizationIndex) { | |
| 91 call_length = next_pc - (pc_offset + gap_code_size); | |
|
Kevin Millikin (Chromium)
2011/02/02 13:05:33
fits = false;
break;
| |
| 92 break; | |
| 93 } | |
| 94 } | |
| 95 if (call_length >= MacroAssembler::kCallInstructionLength) { | |
|
Kevin Millikin (Chromium)
2011/02/02 13:05:33
if (fits) ...
| |
| 96 CodePatcher patcher( | |
|
Kevin Millikin (Chromium)
2011/02/02 13:05:33
CodePatcher patcher(code->instruction_start() + la
| |
| 97 code->instruction_start() + pc_offset + gap_code_size, | |
| 98 Assembler::kCallInstructionLength); | |
| 99 patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY), | |
| 100 RelocInfo::NONE); | |
| 101 last_pc_offset += gap_code_size + Assembler::kCallInstructionLength; | |
|
Kevin Millikin (Chromium)
2011/02/02 13:05:33
last_pc_offset += Assembler::kCallInstructionLengt
| |
| 102 } else { | |
| 103 jump_table -= MacroAssembler::kJumpInstructionLength; | |
| 104 CodePatcher jump_patcher(code->instruction_start() + jump_table, | |
|
Kevin Millikin (Chromium)
2011/02/02 13:05:33
It probably bears naming
Address jump_address = c
| |
| 105 MacroAssembler::kJumpInstructionLength); | |
| 106 jump_patcher.masm()->Jump( | |
| 107 GetDeoptimizationEntry(deoptimization_index, LAZY), | |
| 108 RelocInfo::NONE); | |
| 109 | |
| 110 CodePatcher call_patcher( | |
|
Kevin Millikin (Chromium)
2011/02/02 13:05:33
CodePatcher call_patcher(code->instruction_start +
Lasse Reichstein
2011/02/03 14:14:12
Fixed.
| |
| 111 code->instruction_start() + pc_offset + gap_code_size, | |
| 112 call_length); | |
| 113 call_patcher.masm()->call(code->instruction_start() + jump_table); | |
| 114 last_pc_offset += gap_code_size + call_patcher.masm()->pc_offset(); | |
|
Kevin Millikin (Chromium)
2011/02/02 13:05:33
last_pc_offset += Assembler::kShortCallLength;
| |
| 115 } | |
| 81 } | 116 } |
| 82 } | 117 } |
| 83 #ifdef DEBUG | 118 #ifdef DEBUG |
| 84 // Destroy the code which is not supposed to run again. | 119 // Destroy the code which is not supposed to run again. |
| 85 CHECK(code->safepoint_table_start() >= last_pc_offset); | 120 CHECK(jump_table >= last_pc_offset); |
| 86 unsigned instructions = code->safepoint_table_start() - last_pc_offset; | 121 unsigned instructions = jump_table - last_pc_offset; |
| 87 CodePatcher destroyer(code->instruction_start() + last_pc_offset, | 122 CodePatcher destroyer(code->instruction_start() + last_pc_offset, |
| 88 instructions); | 123 instructions); |
| 89 for (unsigned i = 0; i < instructions; i++) { | 124 for (unsigned i = 0; i < instructions; i++) { |
| 90 destroyer.masm()->int3(); | 125 destroyer.masm()->int3(); |
| 91 } | 126 } |
| 92 #endif | 127 #endif |
| 93 | 128 |
| 94 // Add the deoptimizing code to the list. | 129 // Add the deoptimizing code to the list. |
| 95 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); | 130 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
| 96 node->set_next(deoptimizing_code_list_); | 131 node->set_next(deoptimizing_code_list_); |
| (...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 377 // Preserve deoptimizer object in register rax and get the input | 412 // Preserve deoptimizer object in register rax and get the input |
| 378 // frame descriptor pointer. | 413 // frame descriptor pointer. |
| 379 __ movq(rbx, Operand(rax, Deoptimizer::input_offset())); | 414 __ movq(rbx, Operand(rax, Deoptimizer::input_offset())); |
| 380 | 415 |
| 381 // Fill in the input registers. | 416 // Fill in the input registers. |
| 382 for (int i = kNumberOfRegisters -1; i >= 0; i--) { | 417 for (int i = kNumberOfRegisters -1; i >= 0; i--) { |
| 383 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); | 418 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
| 384 __ pop(Operand(rbx, offset)); | 419 __ pop(Operand(rbx, offset)); |
| 385 } | 420 } |
| 386 | 421 |
| 387 // Fill in the double input registers. | 422 // Fill in the double input registers. |
| 388 int double_regs_offset = FrameDescription::double_registers_offset(); | 423 int double_regs_offset = FrameDescription::double_registers_offset(); |
| 389 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) { | 424 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) { |
| 390 int dst_offset = i * kDoubleSize + double_regs_offset; | 425 int dst_offset = i * kDoubleSize + double_regs_offset; |
| 391 __ pop(Operand(rbx, dst_offset)); | 426 __ pop(Operand(rbx, dst_offset)); |
| 392 } | 427 } |
| 393 | 428 |
| 394 // Remove the bailout id from the stack. | 429 // Remove the bailout id from the stack. |
| 395 if (type() == EAGER) { | 430 if (type() == EAGER) { |
| 396 __ addq(rsp, Immediate(kPointerSize)); | 431 __ addq(rsp, Immediate(kPointerSize)); |
| 397 } else { | 432 } else { |
| 398 __ addq(rsp, Immediate(2 * kPointerSize)); | 433 __ addq(rsp, Immediate(2 * kPointerSize)); |
| 399 } | 434 } |
| 400 | 435 |
| 401 // Compute a pointer to the unwinding limit in register ecx; that is | 436 // Compute a pointer to the unwinding limit in register rcx; that is |
| 402 // the first stack slot not part of the input frame. | 437 // the first stack slot not part of the input frame. |
| 403 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); | 438 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); |
| 404 __ addq(rcx, rsp); | 439 __ addq(rcx, rsp); |
| 405 | 440 |
| 406 // Unwind the stack down to - but not including - the unwinding | 441 // Unwind the stack down to - but not including - the unwinding |
| 407 // limit and copy the contents of the activation frame to the input | 442 // limit and copy the contents of the activation frame to the input |
| 408 // frame description. | 443 // frame description. |
| 409 __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset())); | 444 __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset())); |
| 410 Label pop_loop; | 445 Label pop_loop; |
| 411 __ bind(&pop_loop); | 446 __ bind(&pop_loop); |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 500 } | 535 } |
| 501 __ bind(&done); | 536 __ bind(&done); |
| 502 } | 537 } |
| 503 | 538 |
| 504 #undef __ | 539 #undef __ |
| 505 | 540 |
| 506 | 541 |
| 507 } } // namespace v8::internal | 542 } } // namespace v8::internal |
| 508 | 543 |
| 509 #endif // V8_TARGET_ARCH_X64 | 544 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |