OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 27 matching lines...) Expand all Loading... |
38 | 38 |
39 const int Deoptimizer::table_entry_size_ = 32; | 39 const int Deoptimizer::table_entry_size_ = 32; |
40 | 40 |
41 | 41 |
42 int Deoptimizer::patch_size() { | 42 int Deoptimizer::patch_size() { |
43 const int kCallInstructionSizeInWords = 4; | 43 const int kCallInstructionSizeInWords = 4; |
44 return kCallInstructionSizeInWords * Assembler::kInstrSize; | 44 return kCallInstructionSizeInWords * Assembler::kInstrSize; |
45 } | 45 } |
46 | 46 |
47 | 47 |
48 void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { | |
49 // Nothing to do. No new relocation information is written for lazy | |
50 // deoptimization on MIPS. | |
51 } | |
52 | |
53 | |
54 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { | 48 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
55 HandleScope scope; | 49 HandleScope scope; |
56 AssertNoAllocation no_allocation; | 50 AssertNoAllocation no_allocation; |
57 | 51 |
58 if (!function->IsOptimized()) return; | 52 if (!function->IsOptimized()) return; |
59 | 53 |
60 // Get the optimized code. | 54 // Get the optimized code. |
61 Code* code = function->code(); | 55 Code* code = function->code(); |
| 56 Address code_start_address = code->instruction_start(); |
62 | 57 |
63 // Invalidate the relocation information, as it will become invalid by the | 58 // Invalidate the relocation information, as it will become invalid by the |
64 // code patching below, and is not needed any more. | 59 // code patching below, and is not needed any more. |
65 code->InvalidateRelocation(); | 60 code->InvalidateRelocation(); |
66 | 61 |
67 // For each return after a safepoint insert an absolute call to the | 62 // For each LLazyBailout instruction insert a call to the corresponding |
68 // corresponding deoptimization entry. | 63 // deoptimization entry. |
69 unsigned last_pc_offset = 0; | 64 DeoptimizationInputData* deopt_data = |
70 SafepointTable table(function->code()); | 65 DeoptimizationInputData::cast(code->deoptimization_data()); |
71 for (unsigned i = 0; i < table.length(); i++) { | |
72 unsigned pc_offset = table.GetPcOffset(i); | |
73 SafepointEntry safepoint_entry = table.GetEntry(i); | |
74 int deoptimization_index = safepoint_entry.deoptimization_index(); | |
75 int gap_code_size = safepoint_entry.gap_code_size(); | |
76 // Check that we did not shoot past next safepoint. | |
77 CHECK(pc_offset >= last_pc_offset); | |
78 #ifdef DEBUG | 66 #ifdef DEBUG |
79 // Destroy the code which is not supposed to be run again. | 67 Address prev_call_address = NULL; |
80 int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize; | |
81 CodePatcher destroyer(code->instruction_start() + last_pc_offset, | |
82 instructions); | |
83 for (int x = 0; x < instructions; x++) { | |
84 destroyer.masm()->break_(0); | |
85 } | |
86 #endif | 68 #endif |
87 last_pc_offset = pc_offset; | 69 for (int i = 0; i < deopt_data->DeoptCount(); i++) { |
88 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { | 70 if (deopt_data->Pc(i)->value() == -1) continue; |
89 Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry( | 71 Address call_address = code_start_address + deopt_data->Pc(i)->value(); |
90 deoptimization_index, Deoptimizer::LAZY); | 72 Address deopt_entry = GetDeoptimizationEntry(i, LAZY); |
91 last_pc_offset += gap_code_size; | 73 int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry, |
92 int call_size_in_bytes = MacroAssembler::CallSize(deoptimization_entry, | 74 RelocInfo::NONE); |
93 RelocInfo::NONE); | 75 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; |
94 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; | 76 ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); |
95 ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); | 77 ASSERT(call_size_in_bytes <= patch_size()); |
96 ASSERT(call_size_in_bytes <= patch_size()); | 78 CodePatcher patcher(call_address, call_size_in_words); |
97 CodePatcher patcher(code->instruction_start() + last_pc_offset, | 79 patcher.masm()->Call(deopt_entry, RelocInfo::NONE); |
98 call_size_in_words); | 80 ASSERT(prev_call_address == NULL || |
99 patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE); | 81 call_address >= prev_call_address + patch_size()); |
100 last_pc_offset += call_size_in_bytes; | 82 ASSERT(call_address + patch_size() <= code->instruction_end()); |
101 } | |
102 } | |
103 | 83 |
104 #ifdef DEBUG | 84 #ifdef DEBUG |
105 // Destroy the code which is not supposed to be run again. | 85 prev_call_address = call_address; |
106 int instructions = | 86 #endif |
107 (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize; | |
108 CodePatcher destroyer(code->instruction_start() + last_pc_offset, | |
109 instructions); | |
110 for (int x = 0; x < instructions; x++) { | |
111 destroyer.masm()->break_(0); | |
112 } | 87 } |
113 #endif | |
114 | 88 |
115 Isolate* isolate = code->GetIsolate(); | 89 Isolate* isolate = code->GetIsolate(); |
116 | 90 |
117 // Add the deoptimizing code to the list. | 91 // Add the deoptimizing code to the list. |
118 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); | 92 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
119 DeoptimizerData* data = isolate->deoptimizer_data(); | 93 DeoptimizerData* data = isolate->deoptimizer_data(); |
120 node->set_next(data->deoptimizing_code_list_); | 94 node->set_next(data->deoptimizing_code_list_); |
121 data->deoptimizing_code_list_ = node; | 95 data->deoptimizing_code_list_ = node; |
122 | 96 |
123 // We might be in the middle of incremental marking with compaction. | 97 // We might be in the middle of incremental marking with compaction. |
(...skipping 672 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
796 | 770 |
797 ASSERT_EQ(table_entry_size_, masm()->pc_offset() - start); | 771 ASSERT_EQ(table_entry_size_, masm()->pc_offset() - start); |
798 } | 772 } |
799 __ bind(&done); | 773 __ bind(&done); |
800 } | 774 } |
801 | 775 |
802 #undef __ | 776 #undef __ |
803 | 777 |
804 | 778 |
805 } } // namespace v8::internal | 779 } } // namespace v8::internal |
OLD | NEW |