Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(219)

Side by Side Diff: src/ia32/deoptimizer-ia32.cc

Issue 6606006: [Isolates] Merge 6500:6700 from bleeding_edge to isolates. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: '' Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/codegen-ia32.cc ('k') | src/ia32/disasm-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 19 matching lines...) Expand all
30 #if defined(V8_TARGET_ARCH_IA32) 30 #if defined(V8_TARGET_ARCH_IA32)
31 31
32 #include "codegen.h" 32 #include "codegen.h"
33 #include "deoptimizer.h" 33 #include "deoptimizer.h"
34 #include "full-codegen.h" 34 #include "full-codegen.h"
35 #include "safepoint-table.h" 35 #include "safepoint-table.h"
36 36
37 namespace v8 { 37 namespace v8 {
38 namespace internal { 38 namespace internal {
39 39
40 int Deoptimizer::table_entry_size_ = 10;
40 41
41 int Deoptimizer::table_entry_size_ = 10; 42
43 int Deoptimizer::patch_size() {
44 return Assembler::kCallInstructionLength;
45 }
46
47
48 static void ZapCodeRange(Address start, Address end) {
49 #ifdef DEBUG
50 ASSERT(start <= end);
51 int size = end - start;
52 CodePatcher destroyer(start, size);
53 while (size-- > 0) destroyer.masm()->int3();
54 #endif
55 }
56
42 57
43 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { 58 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
44 AssertNoAllocation no_allocation; 59 AssertNoAllocation no_allocation;
45 60
46 if (!function->IsOptimized()) return; 61 if (!function->IsOptimized()) return;
47 62
48 // Get the optimized code. 63 // Get the optimized code.
49 Code* code = function->code(); 64 Code* code = function->code();
65 Address code_start_address = code->instruction_start();
50 66
51 // Invalidate the relocation information, as it will become invalid by the 67 // We will overwrite the code's relocation info in-place. Relocation info
52 // code patching below, and is not needed any more. 68 // is written backward. The relocation info is the payload of a byte
53 code->InvalidateRelocation(); 69 // array. Later on we will slide this to the start of the byte array and
70 // create a filler object in the remaining space.
71 ByteArray* reloc_info = code->relocation_info();
72 Address reloc_end_address = reloc_info->address() + reloc_info->Size();
73 RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
54 74
55 // For each return after a safepoint insert a absolute call to the 75 // For each return after a safepoint insert a call to the corresponding
56 // corresponding deoptimization entry. 76 // deoptimization entry. Since the call is a relative encoding, write new
57 unsigned last_pc_offset = 0; 77 // reloc info. We do not need any of the existing reloc info because the
58 SafepointTable table(function->code()); 78 // existing code will not be used again (we zap it in debug builds).
59 for (unsigned i = 0; i < table.length(); i++) { 79 SafepointTable table(code);
60 unsigned pc_offset = table.GetPcOffset(i); 80 Address prev_address = code_start_address;
81 for (unsigned i = 0; i < table.length(); ++i) {
82 Address curr_address = code_start_address + table.GetPcOffset(i);
83 ZapCodeRange(prev_address, curr_address);
84
61 SafepointEntry safepoint_entry = table.GetEntry(i); 85 SafepointEntry safepoint_entry = table.GetEntry(i);
62 int deoptimization_index = safepoint_entry.deoptimization_index(); 86 int deoptimization_index = safepoint_entry.deoptimization_index();
63 int gap_code_size = safepoint_entry.gap_code_size(); 87 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
64 #ifdef DEBUG 88 // The gap code is needed to get to the state expected at the bailout.
65 // Destroy the code which is not supposed to run again. 89 curr_address += safepoint_entry.gap_code_size();
66 unsigned instructions = pc_offset - last_pc_offset; 90
67 CodePatcher destroyer(code->instruction_start() + last_pc_offset, 91 CodePatcher patcher(curr_address, patch_size());
68 instructions); 92 Address deopt_entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
69 for (unsigned i = 0; i < instructions; i++) { 93 patcher.masm()->call(deopt_entry, RelocInfo::NONE);
70 destroyer.masm()->int3(); 94
95 // We use RUNTIME_ENTRY for deoptimization bailouts.
96 RelocInfo rinfo(curr_address + 1, // 1 after the call opcode.
97 RelocInfo::RUNTIME_ENTRY,
98 reinterpret_cast<intptr_t>(deopt_entry));
99 reloc_info_writer.Write(&rinfo);
100
101 curr_address += patch_size();
71 } 102 }
72 #endif 103 prev_address = curr_address;
73 last_pc_offset = pc_offset;
74 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
75 CodePatcher patcher(
76 code->instruction_start() + pc_offset + gap_code_size,
77 Assembler::kCallInstructionLength);
78 patcher.masm()->call(GetDeoptimizationEntry(deoptimization_index, LAZY),
79 RelocInfo::NONE);
80 last_pc_offset += gap_code_size + Assembler::kCallInstructionLength;
81 }
82 } 104 }
83 #ifdef DEBUG 105 ZapCodeRange(prev_address,
84 // Destroy the code which is not supposed to run again. 106 code_start_address + code->safepoint_table_offset());
85 unsigned instructions = code->safepoint_table_start() - last_pc_offset; 107
86 CodePatcher destroyer(code->instruction_start() + last_pc_offset, 108 // Move the relocation info to the beginning of the byte array.
87 instructions); 109 int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
88 for (unsigned i = 0; i < instructions; i++) { 110 memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
89 destroyer.masm()->int3(); 111
90 } 112 // The relocation info is in place, update the size.
91 #endif 113 reloc_info->set_length(new_reloc_size);
114
115 // Handle the junk part after the new relocation info. We will create
116 // a non-live object in the extra space at the end of the former reloc info.
117 Address junk_address = reloc_info->address() + reloc_info->Size();
118 ASSERT(junk_address <= reloc_end_address);
119 HEAP->CreateFillerObjectAt(junk_address, reloc_end_address - junk_address);
92 120
93 // Add the deoptimizing code to the list. 121 // Add the deoptimizing code to the list.
94 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); 122 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
95 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); 123 DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
96 node->set_next(data->deoptimizing_code_list_); 124 node->set_next(data->deoptimizing_code_list_);
97 data->deoptimizing_code_list_ = node; 125 data->deoptimizing_code_list_ = node;
98 126
99 // Set the code for the function to non-optimized version. 127 // Set the code for the function to non-optimized version.
100 function->ReplaceCode(function->shared()->code()); 128 function->ReplaceCode(function->shared()->code());
101 129
102 if (FLAG_trace_deopt) { 130 if (FLAG_trace_deopt) {
103 PrintF("[forced deoptimization: "); 131 PrintF("[forced deoptimization: ");
104 function->PrintName(); 132 function->PrintName();
105 PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function)); 133 PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
106 } 134 }
107 } 135 }
108 136
109 137
110 void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code, 138 void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
111 Code* check_code, 139 Code* check_code,
112 Code* replacement_code) { 140 Code* replacement_code) {
113 // Iterate the unoptimized code and patch every stack check except at 141 Address call_target_address = pc_after - kPointerSize;
114 // the function entry. This code assumes the function entry stack 142 ASSERT(check_code->entry() ==
115 // check appears first i.e., is not deferred or otherwise reordered. 143 Assembler::target_address_at(call_target_address));
116 ASSERT(unoptimized_code->kind() == Code::FUNCTION); 144 // The stack check code matches the pattern:
117 bool first = true; 145 //
118 for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask); 146 // cmp esp, <limit>
119 !it.done(); 147 // jae ok
120 it.next()) { 148 // call <stack guard>
121 RelocInfo* rinfo = it.rinfo(); 149 // test eax, <loop nesting depth>
122 if (rinfo->target_address() == Code::cast(check_code)->entry()) { 150 // ok: ...
123 if (first) { 151 //
124 first = false; 152 // We will patch away the branch so the code is:
125 } else { 153 //
126 // The stack check code matches the pattern: 154 // cmp esp, <limit> ;; Not changed
127 // 155 // nop
128 // cmp esp, <limit> 156 // nop
129 // jae ok 157 // call <on-stack replacment>
130 // call <stack guard> 158 // test eax, <loop nesting depth>
131 // test eax, <loop nesting depth> 159 // ok:
132 // ok: ... 160 ASSERT(*(call_target_address - 3) == 0x73 && // jae
133 // 161 *(call_target_address - 2) == 0x07 && // offset
134 // We will patch away the branch so the code is: 162 *(call_target_address - 1) == 0xe8); // call
135 // 163 *(call_target_address - 3) = 0x90; // nop
136 // cmp esp, <limit> ;; Not changed 164 *(call_target_address - 2) = 0x90; // nop
137 // nop 165 Assembler::set_target_address_at(call_target_address,
138 // nop 166 replacement_code->entry());
139 // call <on-stack replacment>
140 // test eax, <loop nesting depth>
141 // ok:
142 Address call_target_address = rinfo->pc();
143 ASSERT(*(call_target_address - 3) == 0x73 && // jae
144 *(call_target_address - 2) == 0x07 && // offset
145 *(call_target_address - 1) == 0xe8); // call
146 *(call_target_address - 3) = 0x90; // nop
147 *(call_target_address - 2) = 0x90; // nop
148 rinfo->set_target_address(replacement_code->entry());
149 }
150 }
151 }
152 } 167 }
153 168
154 169
155 void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code, 170 void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
156 Code* check_code, 171 Code* check_code,
157 Code* replacement_code) { 172 Code* replacement_code) {
158 // Iterate the unoptimized code and revert all the patched stack checks. 173 Address call_target_address = pc_after - kPointerSize;
159 for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask); 174 ASSERT(replacement_code->entry() ==
160 !it.done(); 175 Assembler::target_address_at(call_target_address));
161 it.next()) { 176 // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
162 RelocInfo* rinfo = it.rinfo(); 177 // restore the conditional branch.
163 if (rinfo->target_address() == replacement_code->entry()) { 178 ASSERT(*(call_target_address - 3) == 0x90 && // nop
164 // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to 179 *(call_target_address - 2) == 0x90 && // nop
165 // restore the conditional branch. 180 *(call_target_address - 1) == 0xe8); // call
166 Address call_target_address = rinfo->pc(); 181 *(call_target_address - 3) = 0x73; // jae
167 ASSERT(*(call_target_address - 3) == 0x90 && // nop 182 *(call_target_address - 2) = 0x07; // offset
168 *(call_target_address - 2) == 0x90 && // nop 183 Assembler::set_target_address_at(call_target_address,
169 *(call_target_address - 1) == 0xe8); // call 184 check_code->entry());
170 *(call_target_address - 3) = 0x73; // jae
171 *(call_target_address - 2) = 0x07; // offset
172 rinfo->set_target_address(check_code->entry());
173 }
174 }
175 } 185 }
176 186
177 187
178 static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) { 188 static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
179 ByteArray* translations = data->TranslationByteArray(); 189 ByteArray* translations = data->TranslationByteArray();
180 int length = data->DeoptCount(); 190 int length = data->DeoptCount();
181 for (int i = 0; i < length; i++) { 191 for (int i = 0; i < length; i++) {
182 if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) { 192 if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
183 TranslationIterator it(translations, data->TranslationIndex(i)->value()); 193 TranslationIterator it(translations, data->TranslationIndex(i)->value());
184 int value = it.Next(); 194 int value = it.Next();
(...skipping 459 matching lines...) Expand 10 before | Expand all | Expand 10 after
644 } 654 }
645 __ bind(&done); 655 __ bind(&done);
646 } 656 }
647 657
648 #undef __ 658 #undef __
649 659
650 660
651 } } // namespace v8::internal 661 } } // namespace v8::internal
652 662
653 #endif // V8_TARGET_ARCH_IA32 663 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/codegen-ia32.cc ('k') | src/ia32/disasm-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698