OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 23 matching lines...) Expand all Loading... |
34 #include "full-codegen.h" | 34 #include "full-codegen.h" |
35 #include "safepoint-table.h" | 35 #include "safepoint-table.h" |
36 | 36 |
37 namespace v8 { | 37 namespace v8 { |
38 namespace internal { | 38 namespace internal { |
39 | 39 |
40 | 40 |
41 int Deoptimizer::table_entry_size_ = 10; | 41 int Deoptimizer::table_entry_size_ = 10; |
42 | 42 |
43 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { | 43 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
44 // UNIMPLEMENTED, for now just return. | 44 AssertNoAllocation no_allocation; |
45 return; | 45 |
| 46 if (!function->IsOptimized()) return; |
| 47 |
| 48 // Get the optimized code. |
| 49 Code* code = function->code(); |
| 50 |
| 51 // Invalidate the relocation information, as it will become invalid by the |
| 52 // code patching below, and is not needed any more. |
| 53 code->InvalidateRelocation(); |
| 54 |
| 55 // For each return after a safepoint insert a absolute call to the |
| 56 // corresponding deoptimization entry. |
| 57 unsigned last_pc_offset = 0; |
| 58 SafepointTable table(function->code()); |
| 59 for (unsigned i = 0; i < table.length(); i++) { |
| 60 unsigned pc_offset = table.GetPcOffset(i); |
| 61 SafepointEntry safepoint_entry = table.GetEntry(i); |
| 62 int deoptimization_index = safepoint_entry.deoptimization_index(); |
| 63 int gap_code_size = safepoint_entry.gap_code_size(); |
| 64 #ifdef DEBUG |
| 65 // Destroy the code which is not supposed to run again. |
| 66 unsigned instructions = pc_offset - last_pc_offset; |
| 67 CodePatcher destroyer(code->instruction_start() + last_pc_offset, |
| 68 instructions); |
| 69 for (unsigned i = 0; i < instructions; i++) { |
| 70 destroyer.masm()->int3(); |
| 71 } |
| 72 #endif |
| 73 last_pc_offset = pc_offset; |
| 74 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { |
| 75 CodePatcher patcher( |
| 76 code->instruction_start() + pc_offset + gap_code_size, |
| 77 Assembler::kCallInstructionLength); |
| 78 patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY), |
| 79 RelocInfo::NONE); |
| 80 last_pc_offset += gap_code_size + Assembler::kCallInstructionLength; |
| 81 } |
| 82 } |
| 83 #ifdef DEBUG |
| 84 // Destroy the code which is not supposed to run again. |
| 85 unsigned instructions = code->safepoint_table_start() - last_pc_offset; |
| 86 CodePatcher destroyer(code->instruction_start() + last_pc_offset, |
| 87 instructions); |
| 88 for (unsigned i = 0; i < instructions; i++) { |
| 89 destroyer.masm()->int3(); |
| 90 } |
| 91 #endif |
| 92 |
| 93 // Add the deoptimizing code to the list. |
| 94 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
| 95 node->set_next(deoptimizing_code_list_); |
| 96 deoptimizing_code_list_ = node; |
| 97 |
| 98 // Set the code for the function to non-optimized version. |
| 99 function->ReplaceCode(function->shared()->code()); |
| 100 |
| 101 if (FLAG_trace_deopt) { |
| 102 PrintF("[forced deoptimization: "); |
| 103 function->PrintName(); |
| 104 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); |
| 105 } |
46 } | 106 } |
47 | 107 |
48 | 108 |
49 void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo, | 109 void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo, |
50 Code* replacement_code) { | 110 Code* replacement_code) { |
51 UNIMPLEMENTED(); | 111 UNIMPLEMENTED(); |
52 } | 112 } |
53 | 113 |
54 | 114 |
55 void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) { | 115 void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) { |
56 UNIMPLEMENTED(); | 116 UNIMPLEMENTED(); |
57 } | 117 } |
58 | 118 |
59 | 119 |
60 void Deoptimizer::DoComputeOsrOutputFrame() { | 120 void Deoptimizer::DoComputeOsrOutputFrame() { |
61 UNIMPLEMENTED(); | 121 UNIMPLEMENTED(); |
62 } | 122 } |
63 | 123 |
64 | 124 |
65 void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, | 125 void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, |
66 int frame_index) { | 126 int frame_index) { |
67 UNIMPLEMENTED(); | 127 // Read the ast node id, function, and frame height for this output frame. |
| 128 Translation::Opcode opcode = |
| 129 static_cast<Translation::Opcode>(iterator->Next()); |
| 130 USE(opcode); |
| 131 ASSERT(Translation::FRAME == opcode); |
| 132 int node_id = iterator->Next(); |
| 133 JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); |
| 134 unsigned height = iterator->Next(); |
| 135 unsigned height_in_bytes = height * kPointerSize; |
| 136 if (FLAG_trace_deopt) { |
| 137 PrintF(" translating "); |
| 138 function->PrintName(); |
| 139 PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes); |
| 140 } |
| 141 |
| 142 // The 'fixed' part of the frame consists of the incoming parameters and |
| 143 // the part described by JavaScriptFrameConstants. |
| 144 unsigned fixed_frame_size = ComputeFixedSize(function); |
| 145 unsigned input_frame_size = input_->GetFrameSize(); |
| 146 unsigned output_frame_size = height_in_bytes + fixed_frame_size; |
| 147 |
| 148 // Allocate and store the output frame description. |
| 149 FrameDescription* output_frame = |
| 150 new(output_frame_size) FrameDescription(output_frame_size, function); |
| 151 |
| 152 bool is_bottommost = (0 == frame_index); |
| 153 bool is_topmost = (output_count_ - 1 == frame_index); |
| 154 ASSERT(frame_index >= 0 && frame_index < output_count_); |
| 155 ASSERT(output_[frame_index] == NULL); |
| 156 output_[frame_index] = output_frame; |
| 157 |
| 158 // The top address for the bottommost output frame can be computed from |
| 159 // the input frame pointer and the output frame's height. For all |
| 160 // subsequent output frames, it can be computed from the previous one's |
| 161 // top address and the current frame's size. |
| 162 intptr_t top_address; |
| 163 if (is_bottommost) { |
| 164 // 2 = context and function in the frame. |
| 165 top_address = |
| 166 input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes; |
| 167 } else { |
| 168 top_address = output_[frame_index - 1]->GetTop() - output_frame_size; |
| 169 } |
| 170 output_frame->SetTop(top_address); |
| 171 |
| 172 // Compute the incoming parameter translation. |
| 173 int parameter_count = function->shared()->formal_parameter_count() + 1; |
| 174 unsigned output_offset = output_frame_size; |
| 175 unsigned input_offset = input_frame_size; |
| 176 for (int i = 0; i < parameter_count; ++i) { |
| 177 output_offset -= kPointerSize; |
| 178 DoTranslateCommand(iterator, frame_index, output_offset); |
| 179 } |
| 180 input_offset -= (parameter_count * kPointerSize); |
| 181 |
| 182 // There are no translation commands for the caller's pc and fp, the |
| 183 // context, and the function. Synthesize their values and set them up |
| 184 // explicitly. |
| 185 // |
| 186 // The caller's pc for the bottommost output frame is the same as in the |
| 187 // input frame. For all subsequent output frames, it can be read from the |
| 188 // previous one. This frame's pc can be computed from the non-optimized |
| 189 // function code and AST id of the bailout. |
| 190 output_offset -= kPointerSize; |
| 191 input_offset -= kPointerSize; |
| 192 intptr_t value; |
| 193 if (is_bottommost) { |
| 194 value = input_->GetFrameSlot(input_offset); |
| 195 } else { |
| 196 value = output_[frame_index - 1]->GetPc(); |
| 197 } |
| 198 output_frame->SetFrameSlot(output_offset, value); |
| 199 if (FLAG_trace_deopt) { |
| 200 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
| 201 V8PRIxPTR " ; caller's pc\n", |
| 202 top_address + output_offset, output_offset, value); |
| 203 } |
| 204 |
| 205 // The caller's frame pointer for the bottommost output frame is the same |
| 206 // as in the input frame. For all subsequent output frames, it can be |
| 207 // read from the previous one. Also compute and set this frame's frame |
| 208 // pointer. |
| 209 output_offset -= kPointerSize; |
| 210 input_offset -= kPointerSize; |
| 211 if (is_bottommost) { |
| 212 value = input_->GetFrameSlot(input_offset); |
| 213 } else { |
| 214 value = output_[frame_index - 1]->GetFp(); |
| 215 } |
| 216 output_frame->SetFrameSlot(output_offset, value); |
| 217 intptr_t fp_value = top_address + output_offset; |
| 218 ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value); |
| 219 output_frame->SetFp(fp_value); |
| 220 if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value); |
| 221 if (FLAG_trace_deopt) { |
| 222 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
| 223 V8PRIxPTR " ; caller's fp\n", |
| 224 fp_value, output_offset, value); |
| 225 } |
| 226 |
| 227 // The context can be gotten from the function so long as we don't |
| 228 // optimize functions that need local contexts. |
| 229 output_offset -= kPointerSize; |
| 230 input_offset -= kPointerSize; |
| 231 value = reinterpret_cast<intptr_t>(function->context()); |
| 232 // The context for the bottommost output frame should also agree with the |
| 233 // input frame. |
| 234 ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); |
| 235 output_frame->SetFrameSlot(output_offset, value); |
| 236 if (is_topmost) output_frame->SetRegister(rsi.code(), value); |
| 237 if (FLAG_trace_deopt) { |
| 238 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
| 239 V8PRIxPTR "; context\n", |
| 240 top_address + output_offset, output_offset, value); |
| 241 } |
| 242 |
| 243 // The function was mentioned explicitly in the BEGIN_FRAME. |
| 244 output_offset -= kPointerSize; |
| 245 input_offset -= kPointerSize; |
| 246 value = reinterpret_cast<intptr_t>(function); |
| 247 // The function for the bottommost output frame should also agree with the |
| 248 // input frame. |
| 249 ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); |
| 250 output_frame->SetFrameSlot(output_offset, value); |
| 251 if (FLAG_trace_deopt) { |
| 252 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
| 253 V8PRIxPTR "; function\n", |
| 254 top_address + output_offset, output_offset, value); |
| 255 } |
| 256 |
| 257 // Translate the rest of the frame. |
| 258 for (unsigned i = 0; i < height; ++i) { |
| 259 output_offset -= kPointerSize; |
| 260 DoTranslateCommand(iterator, frame_index, output_offset); |
| 261 } |
| 262 ASSERT(0 == output_offset); |
| 263 |
| 264 // Compute this frame's PC, state, and continuation. |
| 265 Code* non_optimized_code = function->shared()->code(); |
| 266 FixedArray* raw_data = non_optimized_code->deoptimization_data(); |
| 267 DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data); |
| 268 Address start = non_optimized_code->instruction_start(); |
| 269 unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared()); |
| 270 unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state); |
| 271 intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset); |
| 272 output_frame->SetPc(pc_value); |
| 273 |
| 274 FullCodeGenerator::State state = |
| 275 FullCodeGenerator::StateField::decode(pc_and_state); |
| 276 output_frame->SetState(Smi::FromInt(state)); |
| 277 |
| 278 // Set the continuation for the topmost frame. |
| 279 if (is_topmost) { |
| 280 Code* continuation = (bailout_type_ == EAGER) |
| 281 ? Builtins::builtin(Builtins::NotifyDeoptimized) |
| 282 : Builtins::builtin(Builtins::NotifyLazyDeoptimized); |
| 283 output_frame->SetContinuation( |
| 284 reinterpret_cast<intptr_t>(continuation->entry())); |
| 285 } |
| 286 |
| 287 if (output_count_ - 1 == frame_index) iterator->Done(); |
68 } | 288 } |
69 | 289 |
70 | 290 |
| 291 #define __ masm()-> |
| 292 |
71 void Deoptimizer::EntryGenerator::Generate() { | 293 void Deoptimizer::EntryGenerator::Generate() { |
72 // UNIMPLEMENTED, for now just return. | 294 GeneratePrologue(); |
73 return; | 295 CpuFeatures::Scope scope(SSE2); |
| 296 |
| 297 // Save all general purpose registers before messing with them. |
| 298 const int kNumberOfRegisters = Register::kNumRegisters; |
| 299 |
| 300 const int kDoubleRegsSize = kDoubleSize * |
| 301 XMMRegister::kNumAllocatableRegisters; |
| 302 __ subq(rsp, Immediate(kDoubleRegsSize)); |
| 303 |
| 304 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { |
| 305 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); |
| 306 int offset = i * kDoubleSize; |
| 307 __ movsd(Operand(rsp, offset), xmm_reg); |
| 308 } |
| 309 |
| 310 // We push all registers onto the stack, even though we do not need |
| 311 // to restore all later. |
| 312 for (int i = 0; i < kNumberOfRegisters; i++) { |
| 313 Register r = Register::toRegister(i); |
| 314 __ push(r); |
| 315 } |
| 316 |
| 317 const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize + |
| 318 kDoubleRegsSize; |
| 319 |
| 320 // When calling new_deoptimizer_function we need to pass the last argument |
| 321 // on the stack on windows and in r8 on linux. The remaining arguments are |
| 322 // all passed in registers (different ones on linux and windows though). |
| 323 |
| 324 #ifdef _WIN64 |
| 325 Register arg4 = r9; |
| 326 Register arg3 = r8; |
| 327 Register arg2 = rdx; |
| 328 Register arg1 = rcx; |
| 329 #else |
| 330 Register arg4 = rcx; |
| 331 Register arg3 = rdx; |
| 332 Register arg2 = rsi; |
| 333 Register arg1 = rdi; |
| 334 #endif |
| 335 |
| 336 // We use this to keep the value of the fifth argument temporarily. |
| 337 // Unfortunately we can't store it directly in r8 (used for passing |
| 338 // this on linux), since it is another parameter passing register on windows. |
| 339 Register arg5 = r11; |
| 340 |
| 341 // Get the bailout id from the stack. |
| 342 __ movq(arg3, Operand(rsp, kSavedRegistersAreaSize)); |
| 343 |
| 344 // Get the address of the location in the code object if possible |
| 345 // and compute the fp-to-sp delta in register arg5. |
| 346 if (type() == EAGER) { |
| 347 __ Set(arg4, 0); |
| 348 __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); |
| 349 } else { |
| 350 __ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); |
| 351 __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize)); |
| 352 } |
| 353 |
| 354 __ subq(arg5, rbp); |
| 355 __ neg(arg5); |
| 356 |
| 357 // Allocate a new deoptimizer object. |
| 358 __ PrepareCallCFunction(5); |
| 359 __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); |
| 360 __ movq(arg1, rax); |
| 361 __ movq(arg2, Immediate(type())); |
| 362 // Args 3 and 4 are already in the right registers. |
| 363 |
| 364 // On windows put the argument on the stack (PrepareCallCFunction have |
| 365 // created space for this). On linux pass the argument in r8. |
| 366 #ifdef _WIN64 |
| 367 __ movq(Operand(rsp, 0 * kPointerSize), arg5); |
| 368 #else |
| 369 __ movq(r8, arg5); |
| 370 #endif |
| 371 |
| 372 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5); |
| 373 // Preserve deoptimizer object in register rax and get the input |
| 374 // frame descriptor pointer. |
| 375 __ movq(rbx, Operand(rax, Deoptimizer::input_offset())); |
| 376 |
| 377 // Fill in the input registers. |
| 378 for (int i = kNumberOfRegisters -1; i >= 0; i--) { |
| 379 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
| 380 __ pop(Operand(rbx, offset)); |
| 381 // __ movq(rcx, Operand(rsp, (kNumberOfRegisters - 1 - i) * kPointerSize)); |
| 382 // __ movq(Operand(rbx, offset), rcx); |
| 383 } |
| 384 |
| 385 // Fill in the double input registers. |
| 386 int double_regs_offset = FrameDescription::double_registers_offset(); |
| 387 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) { |
| 388 int dst_offset = i * kDoubleSize + double_regs_offset; |
| 389 __ pop(Operand(rbx, dst_offset)); |
| 390 } |
| 391 |
| 392 // Remove the bailout id and the general purpose registers from the stack. |
| 393 if (type() == EAGER) { |
| 394 __ addq(rsp, Immediate(kPointerSize)); |
| 395 } else { |
| 396 __ addq(rsp, Immediate(2 * kPointerSize)); |
| 397 } |
| 398 |
| 399 // Compute a pointer to the unwinding limit in register ecx; that is |
| 400 // the first stack slot not part of the input frame. |
| 401 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); |
| 402 __ addq(rcx, rsp); |
| 403 |
| 404 // Unwind the stack down to - but not including - the unwinding |
| 405 // limit and copy the contents of the activation frame to the input |
| 406 // frame description. |
| 407 __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset())); |
| 408 Label pop_loop; |
| 409 __ bind(&pop_loop); |
| 410 __ pop(Operand(rdx, 0)); |
| 411 __ addq(rdx, Immediate(sizeof(intptr_t))); |
| 412 __ cmpq(rcx, rsp); |
| 413 __ j(not_equal, &pop_loop); |
| 414 |
| 415 // Compute the output frame in the deoptimizer. |
| 416 __ push(rax); |
| 417 __ PrepareCallCFunction(1); |
| 418 __ movq(arg1, rax); |
| 419 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); |
| 420 __ pop(rax); |
| 421 |
| 422 // Replace the current frame with the output frames. |
| 423 Label outer_push_loop, inner_push_loop; |
| 424 // Outer loop state: rax = current FrameDescription**, rdx = one past the |
| 425 // last FrameDescription**. |
| 426 __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset())); |
| 427 __ movq(rax, Operand(rax, Deoptimizer::output_offset())); |
| 428 __ lea(rdx, Operand(rax, rdx, times_8, 0)); |
| 429 __ bind(&outer_push_loop); |
| 430 // Inner loop state: rbx = current FrameDescription*, rcx = loop index. |
| 431 __ movq(rbx, Operand(rax, 0)); |
| 432 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); |
| 433 __ bind(&inner_push_loop); |
| 434 __ subq(rcx, Immediate(sizeof(intptr_t))); |
| 435 __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset())); |
| 436 __ testq(rcx, rcx); |
| 437 __ j(not_zero, &inner_push_loop); |
| 438 __ addq(rax, Immediate(kPointerSize)); |
| 439 __ cmpq(rax, rdx); |
| 440 __ j(below, &outer_push_loop); |
| 441 |
| 442 // In case of OSR, we have to restore the XMM registers. |
| 443 if (type() == OSR) { |
| 444 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { |
| 445 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); |
| 446 int src_offset = i * kDoubleSize + double_regs_offset; |
| 447 __ movsd(xmm_reg, Operand(rbx, src_offset)); |
| 448 } |
| 449 } |
| 450 |
| 451 // Push state, pc, and continuation from the last output frame. |
| 452 if (type() != OSR) { |
| 453 __ push(Operand(rbx, FrameDescription::state_offset())); |
| 454 } |
| 455 __ push(Operand(rbx, FrameDescription::pc_offset())); |
| 456 __ push(Operand(rbx, FrameDescription::continuation_offset())); |
| 457 |
| 458 // Push the registers from the last output frame. |
| 459 for (int i = 0; i < kNumberOfRegisters; i++) { |
| 460 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
| 461 __ push(Operand(rbx, offset)); |
| 462 } |
| 463 |
| 464 // Restore the registers from the stack. |
| 465 for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) { |
| 466 Register r = Register::toRegister(i); |
| 467 // Do not restore rsp, simply pop the value into the next register |
| 468 // and overwrite this afterwards. |
| 469 if (r.is(rsp)) { |
| 470 ASSERT(i > 0); |
| 471 r = Register::toRegister(i - 1); |
| 472 } |
| 473 __ pop(r); |
| 474 } |
| 475 |
| 476 // Set up the roots register. |
| 477 ExternalReference roots_address = ExternalReference::roots_address(); |
| 478 __ movq(r13, roots_address); |
| 479 |
| 480 __ movq(kSmiConstantRegister, |
| 481 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), |
| 482 RelocInfo::NONE); |
| 483 |
| 484 // Return to the continuation point. |
| 485 __ ret(0); |
74 } | 486 } |
75 | 487 |
76 | 488 |
77 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { | 489 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
78 UNIMPLEMENTED(); | 490 // Create a sequence of deoptimization entries. |
| 491 Label done; |
| 492 for (int i = 0; i < count(); i++) { |
| 493 int start = masm()->pc_offset(); |
| 494 USE(start); |
| 495 __ push_imm32(i); |
| 496 __ jmp(&done); |
| 497 ASSERT(masm()->pc_offset() - start == table_entry_size_); |
| 498 } |
| 499 __ bind(&done); |
79 } | 500 } |
80 | 501 |
| 502 #undef __ |
| 503 |
| 504 |
81 } } // namespace v8::internal | 505 } } // namespace v8::internal |
82 | 506 |
83 #endif // V8_TARGET_ARCH_X64 | 507 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |