| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 26 matching lines...) Expand all Loading... |
| 37 | 37 |
| 38 const int Deoptimizer::table_entry_size_ = 16; | 38 const int Deoptimizer::table_entry_size_ = 16; |
| 39 | 39 |
| 40 | 40 |
| 41 int Deoptimizer::patch_size() { | 41 int Deoptimizer::patch_size() { |
| 42 const int kCallInstructionSizeInWords = 3; | 42 const int kCallInstructionSizeInWords = 3; |
| 43 return kCallInstructionSizeInWords * Assembler::kInstrSize; | 43 return kCallInstructionSizeInWords * Assembler::kInstrSize; |
| 44 } | 44 } |
| 45 | 45 |
| 46 | 46 |
| 47 void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { | |
| 48 // Nothing to do. No new relocation information is written for lazy | |
| 49 // deoptimization on ARM. | |
| 50 } | |
| 51 | |
| 52 | |
| 53 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { | 47 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
| 54 HandleScope scope; | 48 HandleScope scope; |
| 55 AssertNoAllocation no_allocation; | 49 AssertNoAllocation no_allocation; |
| 56 | 50 |
| 57 if (!function->IsOptimized()) return; | 51 if (!function->IsOptimized()) return; |
| 58 | 52 |
| 59 // Get the optimized code. | 53 // Get the optimized code. |
| 60 Code* code = function->code(); | 54 Code* code = function->code(); |
| 55 Address code_start_address = code->instruction_start(); |
| 61 | 56 |
| 62 // Invalidate the relocation information, as it will become invalid by the | 57 // Invalidate the relocation information, as it will become invalid by the |
| 63 // code patching below, and is not needed any more. | 58 // code patching below, and is not needed any more. |
| 64 code->InvalidateRelocation(); | 59 code->InvalidateRelocation(); |
| 65 | 60 |
| 66 // For each return after a safepoint insert an absolute call to the | 61 // For each LLazyBailout instruction insert a call to the corresponding |
| 67 // corresponding deoptimization entry. | 62 // deoptimization entry. |
| 68 unsigned last_pc_offset = 0; | 63 DeoptimizationInputData* deopt_data = |
| 69 SafepointTable table(function->code()); | 64 DeoptimizationInputData::cast(code->deoptimization_data()); |
| 70 for (unsigned i = 0; i < table.length(); i++) { | |
| 71 unsigned pc_offset = table.GetPcOffset(i); | |
| 72 SafepointEntry safepoint_entry = table.GetEntry(i); | |
| 73 int deoptimization_index = safepoint_entry.deoptimization_index(); | |
| 74 int gap_code_size = safepoint_entry.gap_code_size(); | |
| 75 // Check that we did not shoot past next safepoint. | |
| 76 CHECK(pc_offset >= last_pc_offset); | |
| 77 #ifdef DEBUG | 65 #ifdef DEBUG |
| 78 // Destroy the code which is not supposed to be run again. | 66 Address prev_call_address = NULL; |
| 79 int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize; | |
| 80 CodePatcher destroyer(code->instruction_start() + last_pc_offset, | |
| 81 instructions); | |
| 82 for (int x = 0; x < instructions; x++) { | |
| 83 destroyer.masm()->bkpt(0); | |
| 84 } | |
| 85 #endif | 67 #endif |
| 86 last_pc_offset = pc_offset; | 68 for (int i = 0; i < deopt_data->DeoptCount(); i++) { |
| 87 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { | 69 if (deopt_data->Pc(i)->value() == -1) continue; |
| 88 Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry( | 70 Address call_address = code_start_address + deopt_data->Pc(i)->value(); |
| 89 deoptimization_index, Deoptimizer::LAZY); | 71 Address deopt_entry = GetDeoptimizationEntry(i, LAZY); |
| 90 last_pc_offset += gap_code_size; | 72 int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry, |
| 91 int call_size_in_bytes = MacroAssembler::CallSize(deoptimization_entry, | 73 RelocInfo::NONE); |
| 92 RelocInfo::NONE); | 74 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; |
| 93 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; | 75 ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); |
| 94 ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); | 76 ASSERT(call_size_in_bytes <= patch_size()); |
| 95 ASSERT(call_size_in_bytes <= patch_size()); | 77 CodePatcher patcher(call_address, call_size_in_words); |
| 96 CodePatcher patcher(code->instruction_start() + last_pc_offset, | 78 patcher.masm()->Call(deopt_entry, RelocInfo::NONE); |
| 97 call_size_in_words); | 79 ASSERT(prev_call_address == NULL || |
| 98 patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE); | 80 call_address >= prev_call_address + patch_size()); |
| 99 last_pc_offset += call_size_in_bytes; | 81 ASSERT(call_address + patch_size() <= code->instruction_end()); |
| 100 } | 82 #ifdef DEBUG |
| 83 prev_call_address = call_address; |
| 84 #endif |
| 101 } | 85 } |
| 102 | 86 |
| 103 #ifdef DEBUG | |
| 104 // Destroy the code which is not supposed to be run again. | |
| 105 int instructions = | |
| 106 (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize; | |
| 107 CodePatcher destroyer(code->instruction_start() + last_pc_offset, | |
| 108 instructions); | |
| 109 for (int x = 0; x < instructions; x++) { | |
| 110 destroyer.masm()->bkpt(0); | |
| 111 } | |
| 112 #endif | |
| 113 | |
| 114 Isolate* isolate = code->GetIsolate(); | 87 Isolate* isolate = code->GetIsolate(); |
| 115 | 88 |
| 116 // Add the deoptimizing code to the list. | 89 // Add the deoptimizing code to the list. |
| 117 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); | 90 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
| 118 DeoptimizerData* data = isolate->deoptimizer_data(); | 91 DeoptimizerData* data = isolate->deoptimizer_data(); |
| 119 node->set_next(data->deoptimizing_code_list_); | 92 node->set_next(data->deoptimizing_code_list_); |
| 120 data->deoptimizing_code_list_ = node; | 93 data->deoptimizing_code_list_ = node; |
| 121 | 94 |
| 122 // We might be in the middle of incremental marking with compaction. | 95 // We might be in the middle of incremental marking with compaction. |
| 123 // Tell collector to treat this code object in a special way and | 96 // Tell collector to treat this code object in a special way and |
| 124 // ignore all slots that might have been recorded on it. | 97 // ignore all slots that might have been recorded on it. |
| 125 isolate->heap()->mark_compact_collector()->InvalidateCode(code); | 98 isolate->heap()->mark_compact_collector()->InvalidateCode(code); |
| 126 | 99 |
| 127 // Set the code for the function to non-optimized version. | 100 // Set the code for the function to non-optimized version. |
| 128 function->ReplaceCode(function->shared()->code()); | 101 function->ReplaceCode(function->shared()->code()); |
| 129 | 102 |
| 130 if (FLAG_trace_deopt) { | 103 if (FLAG_trace_deopt) { |
| 131 PrintF("[forced deoptimization: "); | 104 PrintF("[forced deoptimization: "); |
| 132 function->PrintName(); | 105 function->PrintName(); |
| 133 PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function)); | 106 PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function)); |
| 134 #ifdef DEBUG | |
| 135 if (FLAG_print_code) { | |
| 136 code->PrintLn(); | |
| 137 } | |
| 138 #endif | |
| 139 } | 107 } |
| 140 } | 108 } |
| 141 | 109 |
| 142 | 110 |
| 143 void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, | 111 void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, |
| 144 Address pc_after, | 112 Address pc_after, |
| 145 Code* check_code, | 113 Code* check_code, |
| 146 Code* replacement_code) { | 114 Code* replacement_code) { |
| 147 const int kInstrSize = Assembler::kInstrSize; | 115 const int kInstrSize = Assembler::kInstrSize; |
| 148 // The call of the stack guard check has the following form: | 116 // The call of the stack guard check has the following form: |
| (...skipping 636 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 785 __ push(ip); | 753 __ push(ip); |
| 786 __ b(&done); | 754 __ b(&done); |
| 787 ASSERT(masm()->pc_offset() - start == table_entry_size_); | 755 ASSERT(masm()->pc_offset() - start == table_entry_size_); |
| 788 } | 756 } |
| 789 __ bind(&done); | 757 __ bind(&done); |
| 790 } | 758 } |
| 791 | 759 |
| 792 #undef __ | 760 #undef __ |
| 793 | 761 |
| 794 } } // namespace v8::internal | 762 } } // namespace v8::internal |
| OLD | NEW |