Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(41)

Side by Side Diff: src/ia32/deoptimizer-ia32.cc

Issue 6606002: Merge revision 6500-6600 from bleeding_edge to the isolates branch. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: '' Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 19 matching lines...) Expand all
30 #if defined(V8_TARGET_ARCH_IA32) 30 #if defined(V8_TARGET_ARCH_IA32)
31 31
32 #include "codegen.h" 32 #include "codegen.h"
33 #include "deoptimizer.h" 33 #include "deoptimizer.h"
34 #include "full-codegen.h" 34 #include "full-codegen.h"
35 #include "safepoint-table.h" 35 #include "safepoint-table.h"
36 36
37 namespace v8 { 37 namespace v8 {
38 namespace internal { 38 namespace internal {
39 39
40 int Deoptimizer::table_entry_size_ = 10;
40 41
41 int Deoptimizer::table_entry_size_ = 10; 42
43 int Deoptimizer::patch_size() {
44 return Assembler::kCallInstructionLength;
45 }
46
42 47
43 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { 48 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
44 AssertNoAllocation no_allocation; 49 AssertNoAllocation no_allocation;
45 50
46 if (!function->IsOptimized()) return; 51 if (!function->IsOptimized()) return;
47 52
48 // Get the optimized code. 53 // Get the optimized code.
49 Code* code = function->code(); 54 Code* code = function->code();
50 55
51 // Invalidate the relocation information, as it will become invalid by the
52 // code patching below, and is not needed any more.
53 code->InvalidateRelocation();
54
55 // For each return after a safepoint insert a absolute call to the 56 // For each return after a safepoint insert a absolute call to the
56 // corresponding deoptimization entry. 57 // corresponding deoptimization entry.
57 unsigned last_pc_offset = 0; 58 unsigned last_pc_offset = 0;
58 SafepointTable table(function->code()); 59 SafepointTable table(function->code());
60
61 // We will overwrite the code's relocation info in-place. Relocation info
62 // is written backward. The relocation info is the payload of a byte array.
63 // Later on we will align this at the start of the byte array and create
64 // a trash byte array of the remaining space.
65 ByteArray* reloc_info = code->relocation_info();
66 Address end_address = reloc_info->address() + reloc_info->Size();
67 RelocInfoWriter reloc_info_writer(end_address, code->instruction_start());
68
59 for (unsigned i = 0; i < table.length(); i++) { 69 for (unsigned i = 0; i < table.length(); i++) {
60 unsigned pc_offset = table.GetPcOffset(i); 70 unsigned pc_offset = table.GetPcOffset(i);
61 SafepointEntry safepoint_entry = table.GetEntry(i); 71 SafepointEntry safepoint_entry = table.GetEntry(i);
62 int deoptimization_index = safepoint_entry.deoptimization_index(); 72 int deoptimization_index = safepoint_entry.deoptimization_index();
63 int gap_code_size = safepoint_entry.gap_code_size(); 73 int gap_code_size = safepoint_entry.gap_code_size();
64 #ifdef DEBUG 74 #ifdef DEBUG
65 // Destroy the code which is not supposed to run again. 75 // Destroy the code which is not supposed to run again.
66 unsigned instructions = pc_offset - last_pc_offset; 76 unsigned instructions = pc_offset - last_pc_offset;
67 CodePatcher destroyer(code->instruction_start() + last_pc_offset, 77 CodePatcher destroyer(code->instruction_start() + last_pc_offset,
68 instructions); 78 instructions);
69 for (unsigned i = 0; i < instructions; i++) { 79 for (unsigned i = 0; i < instructions; i++) {
70 destroyer.masm()->int3(); 80 destroyer.masm()->int3();
71 } 81 }
72 #endif 82 #endif
73 last_pc_offset = pc_offset; 83 last_pc_offset = pc_offset;
74 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { 84 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
75 CodePatcher patcher( 85 last_pc_offset += gap_code_size;
76 code->instruction_start() + pc_offset + gap_code_size, 86 Address call_pc = code->instruction_start() + last_pc_offset;
77 Assembler::kCallInstructionLength); 87 CodePatcher patcher(call_pc, patch_size());
78 patcher.masm()->call(GetDeoptimizationEntry(deoptimization_index, LAZY), 88 Address entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
79 RelocInfo::NONE); 89 patcher.masm()->call(entry, RelocInfo::NONE);
80 last_pc_offset += gap_code_size + Assembler::kCallInstructionLength; 90 last_pc_offset += patch_size();
91 RelocInfo rinfo(call_pc + 1, RelocInfo::RUNTIME_ENTRY,
92 reinterpret_cast<intptr_t>(entry));
93 reloc_info_writer.Write(&rinfo);
81 } 94 }
82 } 95 }
83 #ifdef DEBUG 96 #ifdef DEBUG
84 // Destroy the code which is not supposed to run again. 97 // Destroy the code which is not supposed to run again.
85 unsigned instructions = code->safepoint_table_start() - last_pc_offset; 98 unsigned instructions = code->safepoint_table_start() - last_pc_offset;
86 CodePatcher destroyer(code->instruction_start() + last_pc_offset, 99 CodePatcher destroyer(code->instruction_start() + last_pc_offset,
87 instructions); 100 instructions);
88 for (unsigned i = 0; i < instructions; i++) { 101 for (unsigned i = 0; i < instructions; i++) {
89 destroyer.masm()->int3(); 102 destroyer.masm()->int3();
90 } 103 }
91 #endif 104 #endif
92 105
106 // Move the relocation info to the beginning of the byte array.
107 int reloc_size = end_address - reloc_info_writer.pos();
108 memmove(code->relocation_start(), reloc_info_writer.pos(), reloc_size);
109
110 // The relocation info is in place, update the size.
111 reloc_info->set_length(reloc_size);
112
113 // Handle the junk part after the new relocation info. We will create
114 // a non-live object in the extra space at the end of the former reloc info.
115 Address junk = reloc_info->address() + reloc_info->Size();
116 ASSERT(junk <= end_address);
117
118 if (end_address - junk <= ByteArray::kHeaderSize) {
119 // We get in here if there is not enough space for a ByteArray.
120
121 // Both addresses are kPointerSize alligned.
122 CHECK_EQ((end_address - junk) % 4, 0);
123 Map* filler_map = HEAP->one_pointer_filler_map();
124 while (junk < end_address) {
125 HeapObject::FromAddress(junk)->set_map(filler_map);
126 junk += kPointerSize;
127 }
128 } else {
129 int size = end_address - junk;
130 // Since the reloc_end address and junk are both alligned, we shouild,
131 // never have junk which is not a multipla of kPointerSize.
132 CHECK_EQ(size % kPointerSize, 0);
133 CHECK_GT(size, 0);
134 HeapObject* junk_object = HeapObject::FromAddress(junk);
135 junk_object->set_map(HEAP->byte_array_map());
136 int length = ByteArray::LengthFor(end_address - junk);
137 ByteArray::cast(junk_object)->set_length(length);
138 }
139
93 // Add the deoptimizing code to the list. 140 // Add the deoptimizing code to the list.
94 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); 141 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
95 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); 142 DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
96 node->set_next(data->deoptimizing_code_list_); 143 node->set_next(data->deoptimizing_code_list_);
97 data->deoptimizing_code_list_ = node; 144 data->deoptimizing_code_list_ = node;
98 145
99 // Set the code for the function to non-optimized version. 146 // Set the code for the function to non-optimized version.
100 function->ReplaceCode(function->shared()->code()); 147 function->ReplaceCode(function->shared()->code());
101 148
102 if (FLAG_trace_deopt) { 149 if (FLAG_trace_deopt) {
103 PrintF("[forced deoptimization: "); 150 PrintF("[forced deoptimization: ");
104 function->PrintName(); 151 function->PrintName();
105 PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function)); 152 PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
106 } 153 }
107 } 154 }
108 155
109 156
110 void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code, 157 void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
111 Code* check_code, 158 Code* check_code,
112 Code* replacement_code) { 159 Code* replacement_code) {
113 // Iterate the unoptimized code and patch every stack check except at 160 Address call_target_address = pc_after - kPointerSize;
114 // the function entry. This code assumes the function entry stack 161 ASSERT(check_code->entry() ==
115 // check appears first i.e., is not deferred or otherwise reordered. 162 Assembler::target_address_at(call_target_address));
116 ASSERT(unoptimized_code->kind() == Code::FUNCTION); 163 // The stack check code matches the pattern:
117 bool first = true; 164 //
118 for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask); 165 // cmp esp, <limit>
119 !it.done(); 166 // jae ok
120 it.next()) { 167 // call <stack guard>
121 RelocInfo* rinfo = it.rinfo(); 168 // test eax, <loop nesting depth>
122 if (rinfo->target_address() == Code::cast(check_code)->entry()) { 169 // ok: ...
123 if (first) { 170 //
124 first = false; 171 // We will patch away the branch so the code is:
125 } else { 172 //
126 // The stack check code matches the pattern: 173 // cmp esp, <limit> ;; Not changed
127 // 174 // nop
128 // cmp esp, <limit> 175 // nop
129 // jae ok 176 // call <on-stack replacment>
130 // call <stack guard> 177 // test eax, <loop nesting depth>
131 // test eax, <loop nesting depth> 178 // ok:
132 // ok: ... 179 ASSERT(*(call_target_address - 3) == 0x73 && // jae
133 // 180 *(call_target_address - 2) == 0x07 && // offset
134 // We will patch away the branch so the code is: 181 *(call_target_address - 1) == 0xe8); // call
135 // 182 *(call_target_address - 3) = 0x90; // nop
136 // cmp esp, <limit> ;; Not changed 183 *(call_target_address - 2) = 0x90; // nop
137 // nop 184 Assembler::set_target_address_at(call_target_address,
138 // nop 185 replacement_code->entry());
139 // call <on-stack replacment>
140 // test eax, <loop nesting depth>
141 // ok:
142 Address call_target_address = rinfo->pc();
143 ASSERT(*(call_target_address - 3) == 0x73 && // jae
144 *(call_target_address - 2) == 0x07 && // offset
145 *(call_target_address - 1) == 0xe8); // call
146 *(call_target_address - 3) = 0x90; // nop
147 *(call_target_address - 2) = 0x90; // nop
148 rinfo->set_target_address(replacement_code->entry());
149 }
150 }
151 }
152 } 186 }
153 187
154 188
155 void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code, 189 void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
156 Code* check_code, 190 Code* check_code,
157 Code* replacement_code) { 191 Code* replacement_code) {
158 // Iterate the unoptimized code and revert all the patched stack checks. 192 Address call_target_address = pc_after - kPointerSize;
159 for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask); 193 ASSERT(replacement_code->entry() ==
160 !it.done(); 194 Assembler::target_address_at(call_target_address));
161 it.next()) { 195 // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
162 RelocInfo* rinfo = it.rinfo(); 196 // restore the conditional branch.
163 if (rinfo->target_address() == replacement_code->entry()) { 197 ASSERT(*(call_target_address - 3) == 0x90 && // nop
164 // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to 198 *(call_target_address - 2) == 0x90 && // nop
165 // restore the conditional branch. 199 *(call_target_address - 1) == 0xe8); // call
166 Address call_target_address = rinfo->pc(); 200 *(call_target_address - 3) = 0x73; // jae
167 ASSERT(*(call_target_address - 3) == 0x90 && // nop 201 *(call_target_address - 2) = 0x07; // offset
168 *(call_target_address - 2) == 0x90 && // nop 202 Assembler::set_target_address_at(call_target_address,
169 *(call_target_address - 1) == 0xe8); // call 203 check_code->entry());
170 *(call_target_address - 3) = 0x73; // jae
171 *(call_target_address - 2) = 0x07; // offset
172 rinfo->set_target_address(check_code->entry());
173 }
174 }
175 } 204 }
176 205
177 206
178 static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) { 207 static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
179 ByteArray* translations = data->TranslationByteArray(); 208 ByteArray* translations = data->TranslationByteArray();
180 int length = data->DeoptCount(); 209 int length = data->DeoptCount();
181 for (int i = 0; i < length; i++) { 210 for (int i = 0; i < length; i++) {
182 if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) { 211 if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
183 TranslationIterator it(translations, data->TranslationIndex(i)->value()); 212 TranslationIterator it(translations, data->TranslationIndex(i)->value());
184 int value = it.Next(); 213 int value = it.Next();
(...skipping 459 matching lines...) Expand 10 before | Expand all | Expand 10 after
644 } 673 }
645 __ bind(&done); 674 __ bind(&done);
646 } 675 }
647 676
648 #undef __ 677 #undef __
649 678
650 679
651 } } // namespace v8::internal 680 } } // namespace v8::internal
652 681
653 #endif // V8_TARGET_ARCH_IA32 682 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« src/ast.cc ('K') | « src/ia32/codegen-ia32.cc ('k') | src/ia32/full-codegen-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698