| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 23 matching lines...) Expand all Loading... |
| 34 #include "full-codegen.h" | 34 #include "full-codegen.h" |
| 35 #include "safepoint-table.h" | 35 #include "safepoint-table.h" |
| 36 | 36 |
| 37 namespace v8 { | 37 namespace v8 { |
| 38 namespace internal { | 38 namespace internal { |
| 39 | 39 |
| 40 | 40 |
| 41 int Deoptimizer::table_entry_size_ = 10; | 41 int Deoptimizer::table_entry_size_ = 10; |
| 42 | 42 |
| 43 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { | 43 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
| 44 UNIMPLEMENTED(); | 44 AssertNoAllocation no_allocation; |
| 45 |
| 46 if (!function->IsOptimized()) return; |
| 47 |
| 48 // Get the optimized code. |
| 49 Code* code = function->code(); |
| 50 |
| 51 // Invalidate the relocation information, as it will become invalid by the |
| 52 // code patching below, and is not needed any more. |
| 53 code->InvalidateRelocation(); |
| 54 |
| 55 // For each return after a safepoint insert a absolute call to the |
| 56 // corresponding deoptimization entry. |
| 57 unsigned last_pc_offset = 0; |
| 58 SafepointTable table(function->code()); |
| 59 for (unsigned i = 0; i < table.length(); i++) { |
| 60 unsigned pc_offset = table.GetPcOffset(i); |
| 61 SafepointEntry safepoint_entry = table.GetEntry(i); |
| 62 int deoptimization_index = safepoint_entry.deoptimization_index(); |
| 63 int gap_code_size = safepoint_entry.gap_code_size(); |
| 64 #ifdef DEBUG |
| 65 // Destroy the code which is not supposed to run again. |
| 66 unsigned instructions = pc_offset - last_pc_offset; |
| 67 CodePatcher destroyer(code->instruction_start() + last_pc_offset, |
| 68 instructions); |
| 69 for (unsigned i = 0; i < instructions; i++) { |
| 70 destroyer.masm()->int3(); |
| 71 } |
| 72 #endif |
| 73 last_pc_offset = pc_offset; |
| 74 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { |
| 75 CodePatcher patcher( |
| 76 code->instruction_start() + pc_offset + gap_code_size, |
| 77 Assembler::kCallInstructionLength); |
| 78 patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY), |
| 79 RelocInfo::NONE); |
| 80 last_pc_offset += gap_code_size + Assembler::kCallInstructionLength; |
| 81 } |
| 82 } |
| 83 #ifdef DEBUG |
| 84 // Destroy the code which is not supposed to run again. |
| 85 unsigned instructions = code->safepoint_table_start() - last_pc_offset; |
| 86 CodePatcher destroyer(code->instruction_start() + last_pc_offset, |
| 87 instructions); |
| 88 for (unsigned i = 0; i < instructions; i++) { |
| 89 destroyer.masm()->int3(); |
| 90 } |
| 91 #endif |
| 92 |
| 93 // Add the deoptimizing code to the list. |
| 94 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
| 95 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); |
| 96 node->set_next(data->deoptimizing_code_list_); |
| 97 data->deoptimizing_code_list_ = node; |
| 98 |
| 99 // Set the code for the function to non-optimized version. |
| 100 function->ReplaceCode(function->shared()->code()); |
| 101 |
| 102 if (FLAG_trace_deopt) { |
| 103 PrintF("[forced deoptimization: "); |
| 104 function->PrintName(); |
| 105 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); |
| 106 } |
| 45 } | 107 } |
| 46 | 108 |
| 47 | 109 |
| 48 void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo, | 110 void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code, |
| 111 Code* check_code, |
| 49 Code* replacement_code) { | 112 Code* replacement_code) { |
| 50 UNIMPLEMENTED(); | 113 UNIMPLEMENTED(); |
| 51 } | 114 } |
| 52 | 115 |
| 53 | 116 |
| 54 void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) { | 117 void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code, |
| 118 Code* check_code, |
| 119 Code* replacement_code) { |
| 55 UNIMPLEMENTED(); | 120 UNIMPLEMENTED(); |
| 56 } | 121 } |
| 57 | 122 |
| 58 | 123 |
| 59 void Deoptimizer::DoComputeOsrOutputFrame() { | 124 void Deoptimizer::DoComputeOsrOutputFrame() { |
| 60 UNIMPLEMENTED(); | 125 UNIMPLEMENTED(); |
| 61 } | 126 } |
| 62 | 127 |
| 63 | 128 |
| 64 void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, | 129 void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, |
| 65 int frame_index) { | 130 int frame_index) { |
| 66 UNIMPLEMENTED(); | 131 // Read the ast node id, function, and frame height for this output frame. |
| 132 Translation::Opcode opcode = |
| 133 static_cast<Translation::Opcode>(iterator->Next()); |
| 134 USE(opcode); |
| 135 ASSERT(Translation::FRAME == opcode); |
| 136 int node_id = iterator->Next(); |
| 137 JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); |
| 138 unsigned height = iterator->Next(); |
| 139 unsigned height_in_bytes = height * kPointerSize; |
| 140 if (FLAG_trace_deopt) { |
| 141 PrintF(" translating "); |
| 142 function->PrintName(); |
| 143 PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes); |
| 144 } |
| 145 |
| 146 // The 'fixed' part of the frame consists of the incoming parameters and |
| 147 // the part described by JavaScriptFrameConstants. |
| 148 unsigned fixed_frame_size = ComputeFixedSize(function); |
| 149 unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize()); |
| 150 unsigned output_frame_size = height_in_bytes + fixed_frame_size; |
| 151 |
| 152 // Allocate and store the output frame description. |
| 153 FrameDescription* output_frame = |
| 154 new(output_frame_size) FrameDescription(output_frame_size, function); |
| 155 |
| 156 bool is_bottommost = (0 == frame_index); |
| 157 bool is_topmost = (output_count_ - 1 == frame_index); |
| 158 ASSERT(frame_index >= 0 && frame_index < output_count_); |
| 159 ASSERT(output_[frame_index] == NULL); |
| 160 output_[frame_index] = output_frame; |
| 161 |
| 162 // The top address for the bottommost output frame can be computed from |
| 163 // the input frame pointer and the output frame's height. For all |
| 164 // subsequent output frames, it can be computed from the previous one's |
| 165 // top address and the current frame's size. |
| 166 intptr_t top_address; |
| 167 if (is_bottommost) { |
| 168 // 2 = context and function in the frame. |
| 169 top_address = |
| 170 input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes; |
| 171 } else { |
| 172 top_address = output_[frame_index - 1]->GetTop() - output_frame_size; |
| 173 } |
| 174 output_frame->SetTop(top_address); |
| 175 |
| 176 // Compute the incoming parameter translation. |
| 177 int parameter_count = function->shared()->formal_parameter_count() + 1; |
| 178 unsigned output_offset = output_frame_size; |
| 179 unsigned input_offset = input_frame_size; |
| 180 for (int i = 0; i < parameter_count; ++i) { |
| 181 output_offset -= kPointerSize; |
| 182 DoTranslateCommand(iterator, frame_index, output_offset); |
| 183 } |
| 184 input_offset -= (parameter_count * kPointerSize); |
| 185 |
| 186 // There are no translation commands for the caller's pc and fp, the |
| 187 // context, and the function. Synthesize their values and set them up |
| 188 // explicitly. |
| 189 // |
| 190 // The caller's pc for the bottommost output frame is the same as in the |
| 191 // input frame. For all subsequent output frames, it can be read from the |
| 192 // previous one. This frame's pc can be computed from the non-optimized |
| 193 // function code and AST id of the bailout. |
| 194 output_offset -= kPointerSize; |
| 195 input_offset -= kPointerSize; |
| 196 intptr_t value; |
| 197 if (is_bottommost) { |
| 198 value = input_->GetFrameSlot(input_offset); |
| 199 } else { |
| 200 value = output_[frame_index - 1]->GetPc(); |
| 201 } |
| 202 output_frame->SetFrameSlot(output_offset, value); |
| 203 if (FLAG_trace_deopt) { |
| 204 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
| 205 V8PRIxPTR " ; caller's pc\n", |
| 206 top_address + output_offset, output_offset, value); |
| 207 } |
| 208 |
| 209 // The caller's frame pointer for the bottommost output frame is the same |
| 210 // as in the input frame. For all subsequent output frames, it can be |
| 211 // read from the previous one. Also compute and set this frame's frame |
| 212 // pointer. |
| 213 output_offset -= kPointerSize; |
| 214 input_offset -= kPointerSize; |
| 215 if (is_bottommost) { |
| 216 value = input_->GetFrameSlot(input_offset); |
| 217 } else { |
| 218 value = output_[frame_index - 1]->GetFp(); |
| 219 } |
| 220 output_frame->SetFrameSlot(output_offset, value); |
| 221 intptr_t fp_value = top_address + output_offset; |
| 222 ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value); |
| 223 output_frame->SetFp(fp_value); |
| 224 if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value); |
| 225 if (FLAG_trace_deopt) { |
| 226 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
| 227 V8PRIxPTR " ; caller's fp\n", |
| 228 fp_value, output_offset, value); |
| 229 } |
| 230 |
| 231 // The context can be gotten from the function so long as we don't |
| 232 // optimize functions that need local contexts. |
| 233 output_offset -= kPointerSize; |
| 234 input_offset -= kPointerSize; |
| 235 value = reinterpret_cast<intptr_t>(function->context()); |
| 236 // The context for the bottommost output frame should also agree with the |
| 237 // input frame. |
| 238 ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); |
| 239 output_frame->SetFrameSlot(output_offset, value); |
| 240 if (is_topmost) output_frame->SetRegister(rsi.code(), value); |
| 241 if (FLAG_trace_deopt) { |
| 242 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
| 243 V8PRIxPTR "; context\n", |
| 244 top_address + output_offset, output_offset, value); |
| 245 } |
| 246 |
| 247 // The function was mentioned explicitly in the BEGIN_FRAME. |
| 248 output_offset -= kPointerSize; |
| 249 input_offset -= kPointerSize; |
| 250 value = reinterpret_cast<intptr_t>(function); |
| 251 // The function for the bottommost output frame should also agree with the |
| 252 // input frame. |
| 253 ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); |
| 254 output_frame->SetFrameSlot(output_offset, value); |
| 255 if (FLAG_trace_deopt) { |
| 256 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" |
| 257 V8PRIxPTR "; function\n", |
| 258 top_address + output_offset, output_offset, value); |
| 259 } |
| 260 |
| 261 // Translate the rest of the frame. |
| 262 for (unsigned i = 0; i < height; ++i) { |
| 263 output_offset -= kPointerSize; |
| 264 DoTranslateCommand(iterator, frame_index, output_offset); |
| 265 } |
| 266 ASSERT(0 == output_offset); |
| 267 |
| 268 // Compute this frame's PC, state, and continuation. |
| 269 Code* non_optimized_code = function->shared()->code(); |
| 270 FixedArray* raw_data = non_optimized_code->deoptimization_data(); |
| 271 DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data); |
| 272 Address start = non_optimized_code->instruction_start(); |
| 273 unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared()); |
| 274 unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state); |
| 275 intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset); |
| 276 output_frame->SetPc(pc_value); |
| 277 |
| 278 FullCodeGenerator::State state = |
| 279 FullCodeGenerator::StateField::decode(pc_and_state); |
| 280 output_frame->SetState(Smi::FromInt(state)); |
| 281 |
| 282 // Set the continuation for the topmost frame. |
| 283 if (is_topmost) { |
| 284 Code* continuation = (bailout_type_ == EAGER) |
| 285 ? Isolate::Current()->builtins()->builtin(Builtins::NotifyDeoptimized) |
| 286 : Isolate::Current()->builtins()->builtin( |
| 287 Builtins::NotifyLazyDeoptimized); |
| 288 output_frame->SetContinuation( |
| 289 reinterpret_cast<intptr_t>(continuation->entry())); |
| 290 } |
| 291 |
| 292 if (output_count_ - 1 == frame_index) iterator->Done(); |
| 67 } | 293 } |
| 68 | 294 |
| 69 | 295 |
| 296 #define __ masm()-> |
| 297 |
| 70 void Deoptimizer::EntryGenerator::Generate() { | 298 void Deoptimizer::EntryGenerator::Generate() { |
| 71 UNIMPLEMENTED(); | 299 GeneratePrologue(); |
| 300 CpuFeatures::Scope scope(SSE2); |
| 301 |
| 302 // Save all general purpose registers before messing with them. |
| 303 const int kNumberOfRegisters = Register::kNumRegisters; |
| 304 |
| 305 const int kDoubleRegsSize = kDoubleSize * |
| 306 XMMRegister::kNumAllocatableRegisters; |
| 307 __ subq(rsp, Immediate(kDoubleRegsSize)); |
| 308 |
| 309 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { |
| 310 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); |
| 311 int offset = i * kDoubleSize; |
| 312 __ movsd(Operand(rsp, offset), xmm_reg); |
| 313 } |
| 314 |
| 315 // We push all registers onto the stack, even though we do not need |
| 316 // to restore all later. |
| 317 for (int i = 0; i < kNumberOfRegisters; i++) { |
| 318 Register r = Register::toRegister(i); |
| 319 __ push(r); |
| 320 } |
| 321 |
| 322 const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize + |
| 323 kDoubleRegsSize; |
| 324 |
| 325 // When calling new_deoptimizer_function we need to pass the last argument |
| 326 // on the stack on windows and in r8 on linux. The remaining arguments are |
| 327 // all passed in registers (different ones on linux and windows though). |
| 328 |
| 329 #ifdef _WIN64 |
| 330 Register arg4 = r9; |
| 331 Register arg3 = r8; |
| 332 Register arg2 = rdx; |
| 333 Register arg1 = rcx; |
| 334 #else |
| 335 Register arg4 = rcx; |
| 336 Register arg3 = rdx; |
| 337 Register arg2 = rsi; |
| 338 Register arg1 = rdi; |
| 339 #endif |
| 340 |
| 341 // We use this to keep the value of the fifth argument temporarily. |
| 342 // Unfortunately we can't store it directly in r8 (used for passing |
| 343 // this on linux), since it is another parameter passing register on windows. |
| 344 Register arg5 = r11; |
| 345 |
| 346 // Get the bailout id from the stack. |
| 347 __ movq(arg3, Operand(rsp, kSavedRegistersAreaSize)); |
| 348 |
| 349 // Get the address of the location in the code object if possible |
| 350 // and compute the fp-to-sp delta in register arg5. |
| 351 if (type() == EAGER) { |
| 352 __ Set(arg4, 0); |
| 353 __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); |
| 354 } else { |
| 355 __ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); |
| 356 __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize)); |
| 357 } |
| 358 |
| 359 __ subq(arg5, rbp); |
| 360 __ neg(arg5); |
| 361 |
| 362 // Allocate a new deoptimizer object. |
| 363 __ PrepareCallCFunction(5); |
| 364 __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); |
| 365 __ movq(arg1, rax); |
| 366 __ movq(arg2, Immediate(type())); |
| 367 // Args 3 and 4 are already in the right registers. |
| 368 |
| 369 // On windows put the argument on the stack (PrepareCallCFunction have |
| 370 // created space for this). On linux pass the argument in r8. |
| 371 #ifdef _WIN64 |
| 372 __ movq(Operand(rsp, 0 * kPointerSize), arg5); |
| 373 #else |
| 374 __ movq(r8, arg5); |
| 375 #endif |
| 376 |
| 377 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5); |
| 378 // Preserve deoptimizer object in register rax and get the input |
| 379 // frame descriptor pointer. |
| 380 __ movq(rbx, Operand(rax, Deoptimizer::input_offset())); |
| 381 |
| 382 // Fill in the input registers. |
| 383 for (int i = kNumberOfRegisters -1; i >= 0; i--) { |
| 384 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
| 385 __ pop(Operand(rbx, offset)); |
| 386 } |
| 387 |
| 388 // Fill in the double input registers. |
| 389 int double_regs_offset = FrameDescription::double_registers_offset(); |
| 390 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) { |
| 391 int dst_offset = i * kDoubleSize + double_regs_offset; |
| 392 __ pop(Operand(rbx, dst_offset)); |
| 393 } |
| 394 |
| 395 // Remove the bailout id from the stack. |
| 396 if (type() == EAGER) { |
| 397 __ addq(rsp, Immediate(kPointerSize)); |
| 398 } else { |
| 399 __ addq(rsp, Immediate(2 * kPointerSize)); |
| 400 } |
| 401 |
| 402 // Compute a pointer to the unwinding limit in register ecx; that is |
| 403 // the first stack slot not part of the input frame. |
| 404 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); |
| 405 __ addq(rcx, rsp); |
| 406 |
| 407 // Unwind the stack down to - but not including - the unwinding |
| 408 // limit and copy the contents of the activation frame to the input |
| 409 // frame description. |
| 410 __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset())); |
| 411 Label pop_loop; |
| 412 __ bind(&pop_loop); |
| 413 __ pop(Operand(rdx, 0)); |
| 414 __ addq(rdx, Immediate(sizeof(intptr_t))); |
| 415 __ cmpq(rcx, rsp); |
| 416 __ j(not_equal, &pop_loop); |
| 417 |
| 418 // Compute the output frame in the deoptimizer. |
| 419 __ push(rax); |
| 420 __ PrepareCallCFunction(1); |
| 421 __ movq(arg1, rax); |
| 422 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); |
| 423 __ pop(rax); |
| 424 |
| 425 // Replace the current frame with the output frames. |
| 426 Label outer_push_loop, inner_push_loop; |
| 427 // Outer loop state: rax = current FrameDescription**, rdx = one past the |
| 428 // last FrameDescription**. |
| 429 __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset())); |
| 430 __ movq(rax, Operand(rax, Deoptimizer::output_offset())); |
| 431 __ lea(rdx, Operand(rax, rdx, times_8, 0)); |
| 432 __ bind(&outer_push_loop); |
| 433 // Inner loop state: rbx = current FrameDescription*, rcx = loop index. |
| 434 __ movq(rbx, Operand(rax, 0)); |
| 435 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); |
| 436 __ bind(&inner_push_loop); |
| 437 __ subq(rcx, Immediate(sizeof(intptr_t))); |
| 438 __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset())); |
| 439 __ testq(rcx, rcx); |
| 440 __ j(not_zero, &inner_push_loop); |
| 441 __ addq(rax, Immediate(kPointerSize)); |
| 442 __ cmpq(rax, rdx); |
| 443 __ j(below, &outer_push_loop); |
| 444 |
| 445 // In case of OSR, we have to restore the XMM registers. |
| 446 if (type() == OSR) { |
| 447 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { |
| 448 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); |
| 449 int src_offset = i * kDoubleSize + double_regs_offset; |
| 450 __ movsd(xmm_reg, Operand(rbx, src_offset)); |
| 451 } |
| 452 } |
| 453 |
| 454 // Push state, pc, and continuation from the last output frame. |
| 455 if (type() != OSR) { |
| 456 __ push(Operand(rbx, FrameDescription::state_offset())); |
| 457 } |
| 458 __ push(Operand(rbx, FrameDescription::pc_offset())); |
| 459 __ push(Operand(rbx, FrameDescription::continuation_offset())); |
| 460 |
| 461 // Push the registers from the last output frame. |
| 462 for (int i = 0; i < kNumberOfRegisters; i++) { |
| 463 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
| 464 __ push(Operand(rbx, offset)); |
| 465 } |
| 466 |
| 467 // Restore the registers from the stack. |
| 468 for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) { |
| 469 Register r = Register::toRegister(i); |
| 470 // Do not restore rsp, simply pop the value into the next register |
| 471 // and overwrite this afterwards. |
| 472 if (r.is(rsp)) { |
| 473 ASSERT(i > 0); |
| 474 r = Register::toRegister(i - 1); |
| 475 } |
| 476 __ pop(r); |
| 477 } |
| 478 |
| 479 // Set up the roots register. |
| 480 ExternalReference roots_address = ExternalReference::roots_address(); |
| 481 __ movq(r13, roots_address); |
| 482 |
| 483 __ movq(kSmiConstantRegister, |
| 484 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), |
| 485 RelocInfo::NONE); |
| 486 |
| 487 // Return to the continuation point. |
| 488 __ ret(0); |
| 72 } | 489 } |
| 73 | 490 |
| 74 | 491 |
| 75 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { | 492 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
| 76 UNIMPLEMENTED(); | 493 // Create a sequence of deoptimization entries. |
| 494 Label done; |
| 495 for (int i = 0; i < count(); i++) { |
| 496 int start = masm()->pc_offset(); |
| 497 USE(start); |
| 498 __ push_imm32(i); |
| 499 __ jmp(&done); |
| 500 ASSERT(masm()->pc_offset() - start == table_entry_size_); |
| 501 } |
| 502 __ bind(&done); |
| 77 } | 503 } |
| 78 | 504 |
| 505 #undef __ |
| 506 |
| 507 |
| 79 } } // namespace v8::internal | 508 } } // namespace v8::internal |
| 80 | 509 |
| 81 #endif // V8_TARGET_ARCH_X64 | 510 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |