OLD | NEW |
---|---|
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 23 matching lines...) Expand all Loading... | |
34 #include "full-codegen.h" | 34 #include "full-codegen.h" |
35 #include "safepoint-table.h" | 35 #include "safepoint-table.h" |
36 | 36 |
37 namespace v8 { | 37 namespace v8 { |
38 namespace internal { | 38 namespace internal { |
39 | 39 |
40 | 40 |
41 int Deoptimizer::table_entry_size_ = 10; | 41 int Deoptimizer::table_entry_size_ = 10; |
42 | 42 |
43 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { | 43 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
44 // UNIMPLEMENTED, for now just return. | 44 AssertNoAllocation no_allocation; |
45 return; | 45 |
46 if (!function->IsOptimized()) return; | |
47 | |
48 // Get the optimized code. | |
49 Code* code = function->code(); | |
50 | |
51 // Invalidate the relocation information, as it will become invalid by the | |
52 // code patching below, and is not needed any more. | |
53 code->InvalidateRelocation(); | |
54 | |
55 // For each return after a safepoint insert a absolute call to the | |
56 // corresponding deoptimization entry. | |
57 unsigned last_pc_offset = 0; | |
58 SafepointTable table(function->code()); | |
59 for (unsigned i = 0; i < table.length(); i++) { | |
60 unsigned pc_offset = table.GetPcOffset(i); | |
61 SafepointEntry safepoint_entry = table.GetEntry(i); | |
62 int deoptimization_index = safepoint_entry.deoptimization_index(); | |
63 int gap_code_size = safepoint_entry.gap_code_size(); | |
64 #ifdef DEBUG | |
65 // Destroy the code which is not supposed to run again. | |
66 unsigned instructions = pc_offset - last_pc_offset; | |
67 CodePatcher destroyer(code->instruction_start() + last_pc_offset, | |
68 instructions); | |
69 for (unsigned i = 0; i < instructions; i++) { | |
70 destroyer.masm()->int3(); | |
71 } | |
72 #endif | |
73 last_pc_offset = pc_offset; | |
74 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { | |
75 CodePatcher patcher( | |
76 code->instruction_start() + pc_offset + gap_code_size, | |
77 Assembler::kCallInstructionLength); | |
78 patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY), | |
79 RelocInfo::NONE); | |
80 last_pc_offset += gap_code_size + Assembler::kCallInstructionLength; | |
81 } | |
82 } | |
83 #ifdef DEBUG | |
84 // Destroy the code which is not supposed to run again. | |
85 unsigned instructions = code->safepoint_table_start() - last_pc_offset; | |
86 CodePatcher destroyer(code->instruction_start() + last_pc_offset, | |
87 instructions); | |
88 for (unsigned i = 0; i < instructions; i++) { | |
89 destroyer.masm()->int3(); | |
90 } | |
91 #endif | |
92 | |
93 // Add the deoptimizing code to the list. | |
94 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); | |
95 node->set_next(deoptimizing_code_list_); | |
96 deoptimizing_code_list_ = node; | |
97 | |
98 // Set the code for the function to non-optimized version. | |
99 function->ReplaceCode(function->shared()->code()); | |
100 | |
101 if (FLAG_trace_deopt) { | |
102 PrintF("[forced deoptimization: "); | |
103 function->PrintName(); | |
104 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); | |
105 } | |
46 } | 106 } |
47 | 107 |
48 | 108 |
49 void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo, | 109 void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo, |
50 Code* replacement_code) { | 110 Code* replacement_code) { |
51 UNIMPLEMENTED(); | 111 UNIMPLEMENTED(); |
52 } | 112 } |
53 | 113 |
54 | 114 |
55 void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) { | 115 void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) { |
56 UNIMPLEMENTED(); | 116 UNIMPLEMENTED(); |
57 } | 117 } |
58 | 118 |
59 | 119 |
60 void Deoptimizer::DoComputeOsrOutputFrame() { | 120 void Deoptimizer::DoComputeOsrOutputFrame() { |
61 UNIMPLEMENTED(); | 121 UNIMPLEMENTED(); |
62 } | 122 } |
63 | 123 |
64 | 124 |
65 void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, | 125 void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, |
66 int frame_index) { | 126 int frame_index) { |
67 UNIMPLEMENTED(); | 127 // Read the ast node id, function, and frame height for this output frame. |
128 Translation::Opcode opcode = | |
129 static_cast<Translation::Opcode>(iterator->Next()); | |
130 USE(opcode); | |
131 ASSERT(Translation::FRAME == opcode); | |
132 int node_id = iterator->Next(); | |
133 JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); | |
134 unsigned height = iterator->Next(); | |
135 unsigned height_in_bytes = height * kPointerSize; | |
136 if (FLAG_trace_deopt) { | |
137 PrintF(" translating "); | |
138 function->PrintName(); | |
139 PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes); | |
140 } | |
141 | |
142 // The 'fixed' part of the frame consists of the incoming parameters and | |
143 // the part described by JavaScriptFrameConstants. | |
144 unsigned fixed_frame_size = ComputeFixedSize(function); | |
145 unsigned input_frame_size = input_->GetFrameSize(); | |
146 unsigned output_frame_size = height_in_bytes + fixed_frame_size; | |
147 | |
148 // Allocate and store the output frame description. | |
149 FrameDescription* output_frame = | |
150 new(output_frame_size) FrameDescription(output_frame_size, function); | |
151 | |
152 bool is_bottommost = (0 == frame_index); | |
153 bool is_topmost = (output_count_ - 1 == frame_index); | |
154 ASSERT(frame_index >= 0 && frame_index < output_count_); | |
155 ASSERT(output_[frame_index] == NULL); | |
156 output_[frame_index] = output_frame; | |
157 | |
158 // The top address for the bottommost output frame can be computed from | |
159 // the input frame pointer and the output frame's height. For all | |
160 // subsequent output frames, it can be computed from the previous one's | |
161 // top address and the current frame's size. | |
162 intptr_t top_address; | |
163 if (is_bottommost) { | |
164 // 2 = context and function in the frame. | |
165 top_address = | |
166 input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes; | |
167 } else { | |
168 top_address = output_[frame_index - 1]->GetTop() - output_frame_size; | |
169 } | |
170 output_frame->SetTop(top_address); | |
171 | |
172 // Compute the incoming parameter translation. | |
173 int parameter_count = function->shared()->formal_parameter_count() + 1; | |
174 unsigned output_offset = output_frame_size; | |
175 unsigned input_offset = input_frame_size; | |
176 for (int i = 0; i < parameter_count; ++i) { | |
177 output_offset -= kPointerSize; | |
178 DoTranslateCommand(iterator, frame_index, output_offset); | |
179 } | |
180 input_offset -= (parameter_count * kPointerSize); | |
181 | |
182 // There are no translation commands for the caller's pc and fp, the | |
183 // context, and the function. Synthesize their values and set them up | |
184 // explicitly. | |
185 // | |
186 // The caller's pc for the bottommost output frame is the same as in the | |
187 // input frame. For all subsequent output frames, it can be read from the | |
188 // previous one. This frame's pc can be computed from the non-optimized | |
189 // function code and AST id of the bailout. | |
190 output_offset -= kPointerSize; | |
191 input_offset -= kPointerSize; | |
192 intptr_t value; | |
193 if (is_bottommost) { | |
194 value = input_->GetFrameSlot(input_offset); | |
195 } else { | |
196 value = output_[frame_index - 1]->GetPc(); | |
197 } | |
198 output_frame->SetFrameSlot(output_offset, value); | |
199 if (FLAG_trace_deopt) { | |
200 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" | |
201 V8PRIxPTR " ; caller's pc\n", | |
202 top_address + output_offset, output_offset, value); | |
203 } | |
204 | |
205 // The caller's frame pointer for the bottommost output frame is the same | |
206 // as in the input frame. For all subsequent output frames, it can be | |
207 // read from the previous one. Also compute and set this frame's frame | |
208 // pointer. | |
209 output_offset -= kPointerSize; | |
210 input_offset -= kPointerSize; | |
211 if (is_bottommost) { | |
212 value = input_->GetFrameSlot(input_offset); | |
213 } else { | |
214 value = output_[frame_index - 1]->GetFp(); | |
215 } | |
216 output_frame->SetFrameSlot(output_offset, value); | |
217 intptr_t fp_value = top_address + output_offset; | |
218 ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value); | |
219 output_frame->SetFp(fp_value); | |
220 if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value); | |
221 if (FLAG_trace_deopt) { | |
222 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" | |
223 V8PRIxPTR " ; caller's fp\n", | |
224 fp_value, output_offset, value); | |
225 } | |
226 | |
227 // The context can be gotten from the function so long as we don't | |
228 // optimize functions that need local contexts. | |
229 output_offset -= kPointerSize; | |
230 input_offset -= kPointerSize; | |
231 value = reinterpret_cast<intptr_t>(function->context()); | |
232 // The context for the bottommost output frame should also agree with the | |
233 // input frame. | |
234 ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); | |
235 output_frame->SetFrameSlot(output_offset, value); | |
236 if (is_topmost) output_frame->SetRegister(rsi.code(), value); | |
237 if (FLAG_trace_deopt) { | |
238 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" | |
239 V8PRIxPTR "; context\n", | |
240 top_address + output_offset, output_offset, value); | |
241 } | |
242 | |
243 // The function was mentioned explicitly in the BEGIN_FRAME. | |
244 output_offset -= kPointerSize; | |
245 input_offset -= kPointerSize; | |
246 value = reinterpret_cast<intptr_t>(function); | |
247 // The function for the bottommost output frame should also agree with the | |
248 // input frame. | |
249 ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); | |
250 output_frame->SetFrameSlot(output_offset, value); | |
251 if (FLAG_trace_deopt) { | |
252 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" | |
253 V8PRIxPTR "; function\n", | |
254 top_address + output_offset, output_offset, value); | |
255 } | |
256 | |
257 // Translate the rest of the frame. | |
258 for (unsigned i = 0; i < height; ++i) { | |
259 output_offset -= kPointerSize; | |
260 DoTranslateCommand(iterator, frame_index, output_offset); | |
261 } | |
262 ASSERT(0 == output_offset); | |
263 | |
264 // Compute this frame's PC, state, and continuation. | |
265 Code* non_optimized_code = function->shared()->code(); | |
266 FixedArray* raw_data = non_optimized_code->deoptimization_data(); | |
267 DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data); | |
268 Address start = non_optimized_code->instruction_start(); | |
269 unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared()); | |
270 unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state); | |
271 intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset); | |
272 output_frame->SetPc(pc_value); | |
273 | |
274 FullCodeGenerator::State state = | |
275 FullCodeGenerator::StateField::decode(pc_and_state); | |
276 output_frame->SetState(Smi::FromInt(state)); | |
277 | |
278 // Set the continuation for the topmost frame. | |
279 if (is_topmost) { | |
280 Code* continuation = (bailout_type_ == EAGER) | |
281 ? Builtins::builtin(Builtins::NotifyDeoptimized) | |
282 : Builtins::builtin(Builtins::NotifyLazyDeoptimized); | |
283 output_frame->SetContinuation( | |
284 reinterpret_cast<intptr_t>(continuation->entry())); | |
285 } | |
286 | |
287 if (output_count_ - 1 == frame_index) iterator->Done(); | |
68 } | 288 } |
69 | 289 |
70 | 290 |
291 #define __ masm()-> | |
292 | |
71 void Deoptimizer::EntryGenerator::Generate() { | 293 void Deoptimizer::EntryGenerator::Generate() { |
72 // UNIMPLEMENTED, for now just return. | 294 GeneratePrologue(); |
73 return; | 295 CpuFeatures::Scope scope(SSE2); |
296 | |
297 // Save all general purpose registers before messing with them. | |
298 const int kNumberOfRegisters = Register::kNumRegisters; | |
299 | |
300 const int kDoubleRegsSize = kDoubleSize * | |
301 XMMRegister::kNumAllocatableRegisters; | |
302 __ subq(rsp, Immediate(kDoubleRegsSize)); | |
303 | |
304 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { | |
305 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); | |
306 int offset = i * kDoubleSize; | |
307 __ movsd(Operand(rsp, offset), xmm_reg); | |
308 } | |
309 | |
310 // We push all registers onto the stack, even though we do not need | |
311 // to restore all later. | |
312 for (int i = 0; i < kNumberOfRegisters; i++) { | |
313 Register r = Register::toRegister(i); | |
314 __ push(r); | |
315 } | |
316 | |
317 const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize + | |
318 kDoubleRegsSize; | |
319 | |
320 // When calling new_deoptimizer_function we need to pass the last argument | |
321 // on the stack on windows and in r8 on windows. The remaining arguments are | |
Kevin Millikin (Chromium)
2011/01/24 13:52:21
Comment mentions "on windows" and "on windows".
Rico
2011/01/25 07:48:16
Done.
| |
322 // all passed in registers (different ones on linux and windows though). | |
323 | |
324 #ifdef _WIN64 | |
325 Register arg4 = r9; | |
326 Register arg3 = r8; | |
327 Register arg2 = rdx; | |
328 Register arg1 = rcx; | |
329 #else | |
330 Register arg4 = rcx; | |
331 Register arg3 = rdx; | |
332 Register arg2 = rsi; | |
333 Register arg1 = rdi; | |
334 #endif | |
335 | |
336 // We use this to keep the value of the fifth argument temporarily. | |
337 // Unfortunately we can't store it directly in r8 (used for passing | |
338 // this on linux), since it is another parameter passing register on windows. | |
339 Register arg5 = r11; | |
340 | |
341 // Get the bailout id from the stack. | |
342 __ movq(arg3, Operand(rsp, kSavedRegistersAreaSize)); | |
343 | |
344 // Get the address of the location in the code object if possible | |
345 // and compute the fp-to-sp delta in register arg5. | |
346 if (type() == EAGER) { | |
347 __ Set(arg4, 0); | |
348 __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); | |
349 } else { | |
350 __ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); | |
351 __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize)); | |
352 } | |
353 | |
354 __ subq(arg5, rbp); | |
355 __ neg(arg5); | |
356 | |
357 // Allocate a new deoptimizer object. | |
358 __ PrepareCallCFunction(5); | |
359 __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); | |
360 __ movq(arg1, rax); | |
361 __ movq(arg2, Immediate(type())); | |
362 // Args 3 and 4 are already in the right registers. | |
363 | |
364 // On windows put the argument on the stack (PrepareCallCFunction have | |
365 // created space for this). On linux pass the argument in r8 | |
366 #ifdef _WIN64 | |
367 __ movq(Operand(rsp, 0 * kPointerSize), arg5); | |
368 #else | |
369 __ movq(r8, arg5); | |
370 #endif | |
371 | |
372 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5); | |
373 // Preserve deoptimizer object in register rax and get the input | |
Lasse Reichstein
2011/01/24 13:24:21
Indentation.
Rico
2011/01/24 13:40:14
Done.
| |
374 // frame descriptor pointer. | |
375 __ movq(rbx, Operand(rax, Deoptimizer::input_offset())); | |
376 | |
377 // Fill in the input registers. | |
378 for (int i = 0; i < kNumberOfRegisters; i++) { | |
379 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); | |
380 __ movq(rcx, Operand(rsp, (kNumberOfRegisters - 1 - i) * kPointerSize)); | |
Kevin Millikin (Chromium)
2011/01/24 13:52:21
I wonder why we don't count i down from kNumberOfR
Rico
2011/01/25 07:48:16
Done.
| |
381 __ movq(Operand(rbx, offset), rcx); | |
382 } | |
383 | |
384 // Fill in the double input registers. | |
385 int double_regs_offset = FrameDescription::double_registers_offset(); | |
386 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { | |
387 int dst_offset = i * kDoubleSize + double_regs_offset; | |
388 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; | |
Lasse Reichstein
2011/01/24 13:24:21
If the values are at the end of the stack, you cou
Rico
2011/01/24 13:40:14
They are not at the end, the general purpose regis
Kevin Millikin (Chromium)
2011/01/24 13:52:21
Agree with lrn.
Rico
2011/01/25 07:48:16
Done.
| |
389 __ movsd(xmm0, Operand(rsp, src_offset)); | |
390 __ movsd(Operand(rbx, dst_offset), xmm0); | |
391 } | |
392 | |
393 // Remove the bailout id and the general purpose registers from the stack. | |
394 if (type() == EAGER) { | |
395 __ addq(rsp, Immediate(kSavedRegistersAreaSize + kPointerSize)); | |
396 } else { | |
397 __ addq(rsp, Immediate(kSavedRegistersAreaSize + 2 * kPointerSize)); | |
398 } | |
399 | |
400 // Compute a pointer to the unwinding limit in register ecx; that is | |
401 // the first stack slot not part of the input frame. | |
402 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); | |
403 __ addq(rcx, rsp); | |
404 | |
405 // Unwind the stack down to - but not including - the unwinding | |
406 // limit and copy the contents of the activation frame to the input | |
407 // frame description. | |
408 __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset())); | |
409 Label pop_loop; | |
410 __ bind(&pop_loop); | |
411 __ pop(Operand(rdx, 0)); | |
412 __ addq(rdx, Immediate(sizeof(intptr_t))); | |
413 __ cmpq(rcx, rsp); | |
414 __ j(not_equal, &pop_loop); | |
415 | |
416 // Compute the output frame in the deoptimizer. | |
417 __ push(rax); | |
418 __ PrepareCallCFunction(1); | |
419 __ movq(arg1, rax); | |
420 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); | |
421 __ pop(rax); | |
422 | |
423 // Replace the current frame with the output frames. | |
424 Label outer_push_loop, inner_push_loop; | |
425 // Outer loop state: rax = current FrameDescription**, rdx = one past the | |
426 // last FrameDescription**. | |
427 __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset())); | |
428 __ movq(rax, Operand(rax, Deoptimizer::output_offset())); | |
429 __ lea(rdx, Operand(rax, rdx, times_8, 0)); | |
430 __ bind(&outer_push_loop); | |
431 // Inner loop state: rbx = current FrameDescription*, rcx = loop index. | |
432 __ movq(rbx, Operand(rax, 0)); | |
433 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); | |
434 __ bind(&inner_push_loop); | |
435 __ subq(rcx, Immediate(sizeof(intptr_t))); | |
436 __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset())); | |
437 __ testq(rcx, rcx); | |
438 __ j(not_zero, &inner_push_loop); | |
439 __ addq(rax, Immediate(kPointerSize)); | |
440 __ cmpq(rax, rdx); | |
441 __ j(below, &outer_push_loop); | |
442 | |
443 // In case of OSR, we have to restore the XMM registers. | |
444 if (type() == OSR) { | |
445 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { | |
446 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); | |
447 int src_offset = i * kDoubleSize + double_regs_offset; | |
448 __ movsd(xmm_reg, Operand(rbx, src_offset)); | |
449 } | |
450 } | |
451 | |
452 // Push state, pc, and continuation from the last output frame. | |
453 if (type() != OSR) { | |
454 __ push(Operand(rbx, FrameDescription::state_offset())); | |
455 } | |
456 __ push(Operand(rbx, FrameDescription::pc_offset())); | |
457 __ push(Operand(rbx, FrameDescription::continuation_offset())); | |
458 | |
459 // Push the registers from the last output frame. | |
460 for (int i = 0; i < kNumberOfRegisters; i++) { | |
461 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); | |
462 __ push(Operand(rbx, offset)); | |
463 } | |
464 | |
465 // Restore the registers from the stack. | |
466 for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) { | |
467 Register r = Register::toRegister(i); | |
468 // Do not restore rsp, simply pop the value into the next register | |
469 // and overwrite this immediately. | |
470 if (r.is(rsp)) { | |
471 ASSERT(i > 0); | |
Lasse Reichstein
2011/01/24 13:24:21
Why not just
if (r.is(rsp)) {
ASSERT(i > 0);
Rico
2011/01/24 13:40:14
Done.
| |
472 i--; | |
473 r = Register::toRegister(i); | |
474 __ pop(r); | |
475 } | |
476 __ pop(r); | |
477 } | |
478 | |
479 // Set up the roots register. | |
480 ExternalReference roots_address = ExternalReference::roots_address(); | |
481 __ movq(r13, roots_address); | |
482 | |
483 __ movq(kSmiConstantRegister, | |
484 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), | |
Lasse Reichstein
2011/01/24 13:24:21
Indentation.
Rico
2011/01/24 13:40:14
Done.
| |
485 RelocInfo::NONE); | |
486 | |
487 // Return to the continuation point. | |
488 __ ret(0); | |
74 } | 489 } |
75 | 490 |
76 | 491 |
77 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { | 492 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
78 UNIMPLEMENTED(); | 493 // Create a sequence of deoptimization entries. |
494 Label done; | |
495 for (int i = 0; i < count(); i++) { | |
496 int start = masm()->pc_offset(); | |
497 USE(start); | |
498 __ push_imm32(i); | |
499 __ jmp(&done); | |
500 ASSERT(masm()->pc_offset() - start == table_entry_size_); | |
501 } | |
502 __ bind(&done); | |
79 } | 503 } |
80 | 504 |
505 #undef __ | |
506 | |
507 | |
81 } } // namespace v8::internal | 508 } } // namespace v8::internal |
82 | 509 |
83 #endif // V8_TARGET_ARCH_X64 | 510 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |