OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 27 matching lines...) Expand all Loading... | |
38 namespace internal { | 38 namespace internal { |
39 | 39 |
40 int Deoptimizer::table_entry_size_ = 10; | 40 int Deoptimizer::table_entry_size_ = 10; |
41 | 41 |
42 | 42 |
43 int Deoptimizer::patch_size() { | 43 int Deoptimizer::patch_size() { |
44 return Assembler::kCallInstructionLength; | 44 return Assembler::kCallInstructionLength; |
45 } | 45 } |
46 | 46 |
47 | 47 |
48 static void ZapCodeRange(Address start, Address end) { | |
49 #ifdef DEBUG | |
50 ASSERT(start <= end); | |
51 int size = end - start; | |
52 CodePatcher destroyer(start, size); | |
53 while (size-- > 0) destroyer.masm()->int3(); | |
54 #endif | |
55 } | |
56 | |
57 | |
48 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { | 58 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
49 AssertNoAllocation no_allocation; | 59 AssertNoAllocation no_allocation; |
50 | 60 |
51 if (!function->IsOptimized()) return; | 61 if (!function->IsOptimized()) return; |
52 | 62 |
53 // Get the optimized code. | 63 // Get the optimized code. |
54 Code* code = function->code(); | 64 Code* code = function->code(); |
55 | 65 Address code_start_address = code->instruction_start(); |
56 // For each return after a safepoint insert a absolute call to the | |
57 // corresponding deoptimization entry. | |
58 unsigned last_pc_offset = 0; | |
59 SafepointTable table(function->code()); | |
60 | 66 |
61 // We will overwrite the code's relocation info in-place. Relocation info | 67 // We will overwrite the code's relocation info in-place. Relocation info |
62 // is written backward. The relocation info is the payload of a byte array. | 68 // is written backward. The relocation info is the payload of a byte array. |
63 // Later on we will align this at the start of the byte array and create | 69 // Later on we will align this at the start of the byte array and create |
64 // a trash byte array of the remaining space. | 70 // a trash byte array of the remaining space. |
65 ByteArray* reloc_info = code->relocation_info(); | 71 ByteArray* reloc_info = code->relocation_info(); |
66 Address end_address = reloc_info->address() + reloc_info->Size(); | 72 Address reloc_end_address = reloc_info->address() + reloc_info->Size(); |
67 RelocInfoWriter reloc_info_writer(end_address, code->instruction_start()); | 73 RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address); |
68 | 74 |
69 for (unsigned i = 0; i < table.length(); i++) { | 75 // For each return after a safepoint insert a absolute call to the |
70 unsigned pc_offset = table.GetPcOffset(i); | 76 // corresponding deoptimization entry. |
77 SafepointTable table(code); | |
78 Address prev_address = code_start_address; | |
79 for (unsigned i = 0; i < table.length(); ++i) { | |
80 Address curr_address = code_start_address + table.GetPcOffset(i); | |
81 ZapCodeRange(prev_address, curr_address); | |
82 | |
71 SafepointEntry safepoint_entry = table.GetEntry(i); | 83 SafepointEntry safepoint_entry = table.GetEntry(i); |
72 int deoptimization_index = safepoint_entry.deoptimization_index(); | 84 int deoptimization_index = safepoint_entry.deoptimization_index(); |
73 int gap_code_size = safepoint_entry.gap_code_size(); | 85 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { |
74 #ifdef DEBUG | 86 // The gap code is needed to get to the state expected at the bailout. |
75 // Destroy the code which is not supposed to run again. | 87 curr_address += safepoint_entry.gap_code_size(); |
76 unsigned instructions = pc_offset - last_pc_offset; | 88 |
77 CodePatcher destroyer(code->instruction_start() + last_pc_offset, | 89 CodePatcher patcher(curr_address, patch_size()); |
78 instructions); | 90 Address deopt_entry = GetDeoptimizationEntry(deoptimization_index, LAZY); |
79 for (unsigned i = 0; i < instructions; i++) { | 91 patcher.masm()->call(deopt_entry, RelocInfo::NONE); |
80 destroyer.masm()->int3(); | 92 |
93 RelocInfo rinfo(curr_address + 1, // address is 1 after the call opcode | |
Rico
2011/02/03 09:44:26
Period at end of comment
Kevin Millikin (Chromium)
2011/02/04 11:48:54
I didn't intend it to be a complete sentence, but
| |
94 RelocInfo::RUNTIME_ENTRY, | |
Rico
2011/02/03 09:44:26
Maybe we should have a comment stating: "We use Re
Kevin Millikin (Chromium)
2011/02/04 11:48:54
OK.
| |
95 reinterpret_cast<intptr_t>(deopt_entry)); | |
96 reloc_info_writer.Write(&rinfo); | |
97 | |
98 curr_address += patch_size(); | |
81 } | 99 } |
82 #endif | 100 prev_address = curr_address; |
83 last_pc_offset = pc_offset; | |
84 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { | |
85 last_pc_offset += gap_code_size; | |
86 Address call_pc = code->instruction_start() + last_pc_offset; | |
87 CodePatcher patcher(call_pc, patch_size()); | |
88 Address entry = GetDeoptimizationEntry(deoptimization_index, LAZY); | |
89 patcher.masm()->call(entry, RelocInfo::NONE); | |
90 last_pc_offset += patch_size(); | |
91 RelocInfo rinfo(call_pc + 1, RelocInfo::RUNTIME_ENTRY, | |
92 reinterpret_cast<intptr_t>(entry)); | |
93 reloc_info_writer.Write(&rinfo); | |
94 } | |
95 } | 101 } |
96 #ifdef DEBUG | 102 ZapCodeRange(prev_address, |
97 // Destroy the code which is not supposed to run again. | 103 code_start_address + code->safepoint_table_offset()); |
98 unsigned instructions = code->safepoint_table_start() - last_pc_offset; | |
99 CodePatcher destroyer(code->instruction_start() + last_pc_offset, | |
100 instructions); | |
101 for (unsigned i = 0; i < instructions; i++) { | |
102 destroyer.masm()->int3(); | |
103 } | |
104 #endif | |
105 | 104 |
106 // Move the relocation info to the beginning of the byte array. | 105 // Move the relocation info to the beginning of the byte array. |
107 int reloc_size = end_address - reloc_info_writer.pos(); | 106 int new_reloc_size = reloc_end_address - reloc_info_writer.pos(); |
108 memmove(code->relocation_start(), reloc_info_writer.pos(), reloc_size); | 107 memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size); |
109 | 108 |
110 // The relocation info is in place, update the size. | 109 // The relocation info is in place, update the size. |
111 reloc_info->set_length(reloc_size); | 110 reloc_info->set_length(new_reloc_size); |
112 | 111 |
113 // Handle the junk part after the new relocation info. We will create | 112 // Handle the junk part after the new relocation info. We will create |
114 // a non-live object in the extra space at the end of the former reloc info. | 113 // a non-live object in the extra space at the end of the former reloc info. |
115 Address junk = reloc_info->address() + reloc_info->Size(); | 114 Address junk_address = reloc_info->address() + reloc_info->Size(); |
116 ASSERT(junk <= end_address); | 115 ASSERT(junk_address <= reloc_end_address); |
117 | 116 Heap::CreateFillerObjectAt(junk_address, reloc_end_address - junk_address); |
118 if (end_address - junk <= ByteArray::kHeaderSize) { | |
119 // We get in here if there is not enough space for a ByteArray. | |
120 | |
121 // Both addresses are kPointerSize alligned. | |
122 CHECK_EQ((end_address - junk) % 4, 0); | |
123 Map* filler_map = Heap::one_pointer_filler_map(); | |
124 while (junk < end_address) { | |
125 HeapObject::FromAddress(junk)->set_map(filler_map); | |
126 junk += kPointerSize; | |
127 } | |
128 } else { | |
129 int size = end_address - junk; | |
130 // Since the reloc_end address and junk are both alligned, we shouild, | |
131 // never have junk which is not a multipla of kPointerSize. | |
132 CHECK_EQ(size % kPointerSize, 0); | |
133 CHECK_GT(size, 0); | |
134 HeapObject* junk_object = HeapObject::FromAddress(junk); | |
135 junk_object->set_map(Heap::byte_array_map()); | |
136 int length = ByteArray::LengthFor(end_address - junk); | |
137 ByteArray::cast(junk_object)->set_length(length); | |
138 } | |
139 | 117 |
140 // Add the deoptimizing code to the list. | 118 // Add the deoptimizing code to the list. |
141 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); | 119 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
142 node->set_next(deoptimizing_code_list_); | 120 node->set_next(deoptimizing_code_list_); |
143 deoptimizing_code_list_ = node; | 121 deoptimizing_code_list_ = node; |
144 | 122 |
145 // Set the code for the function to non-optimized version. | 123 // Set the code for the function to non-optimized version. |
146 function->ReplaceCode(function->shared()->code()); | 124 function->ReplaceCode(function->shared()->code()); |
147 | 125 |
148 if (FLAG_trace_deopt) { | 126 if (FLAG_trace_deopt) { |
(...skipping 521 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
670 } | 648 } |
671 __ bind(&done); | 649 __ bind(&done); |
672 } | 650 } |
673 | 651 |
674 #undef __ | 652 #undef __ |
675 | 653 |
676 | 654 |
677 } } // namespace v8::internal | 655 } } // namespace v8::internal |
678 | 656 |
679 #endif // V8_TARGET_ARCH_IA32 | 657 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |