| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 77 #endif | 77 #endif |
| 78 DeoptimizationInputData* deopt_data = | 78 DeoptimizationInputData* deopt_data = |
| 79 DeoptimizationInputData::cast(code->deoptimization_data()); | 79 DeoptimizationInputData::cast(code->deoptimization_data()); |
| 80 for (int i = 0; i < deopt_data->DeoptCount(); i++) { | 80 for (int i = 0; i < deopt_data->DeoptCount(); i++) { |
| 81 if (deopt_data->Pc(i)->value() == -1) continue; | 81 if (deopt_data->Pc(i)->value() == -1) continue; |
| 82 // Position where Call will be patched in. | 82 // Position where Call will be patched in. |
| 83 Address call_address = instruction_start + deopt_data->Pc(i)->value(); | 83 Address call_address = instruction_start + deopt_data->Pc(i)->value(); |
| 84 // There is room enough to write a long call instruction because we pad | 84 // There is room enough to write a long call instruction because we pad |
| 85 // LLazyBailout instructions with nops if necessary. | 85 // LLazyBailout instructions with nops if necessary. |
| 86 CodePatcher patcher(call_address, Assembler::kCallInstructionLength); | 86 CodePatcher patcher(call_address, Assembler::kCallInstructionLength); |
| 87 #ifndef V8_TARGET_ARCH_X32 |
| 87 patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY), | 88 patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY), |
| 88 RelocInfo::NONE64); | 89 RelocInfo::NONE64); |
| 90 #else |
| 91 patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY), |
| 92 RelocInfo::NONE32); |
| 93 #endif |
| 89 ASSERT(prev_call_address == NULL || | 94 ASSERT(prev_call_address == NULL || |
| 90 call_address >= prev_call_address + patch_size()); | 95 call_address >= prev_call_address + patch_size()); |
| 91 ASSERT(call_address + patch_size() <= code->instruction_end()); | 96 ASSERT(call_address + patch_size() <= code->instruction_end()); |
| 92 #ifdef DEBUG | 97 #ifdef DEBUG |
| 93 prev_call_address = call_address; | 98 prev_call_address = call_address; |
| 94 #endif | 99 #endif |
| 95 } | 100 } |
| 96 | 101 |
| 97 // Add the deoptimizing code to the list. | 102 // Add the deoptimizing code to the list. |
| 98 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); | 103 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 109 | 114 |
| 110 if (FLAG_trace_deopt) { | 115 if (FLAG_trace_deopt) { |
| 111 PrintF("[forced deoptimization: "); | 116 PrintF("[forced deoptimization: "); |
| 112 function->PrintName(); | 117 function->PrintName(); |
| 113 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); | 118 PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); |
| 114 } | 119 } |
| 115 } | 120 } |
| 116 | 121 |
| 117 | 122 |
| 118 static const byte kJnsInstruction = 0x79; | 123 static const byte kJnsInstruction = 0x79; |
| 124 #ifndef V8_TARGET_ARCH_X32 |
| 119 static const byte kJnsOffset = 0x1d; | 125 static const byte kJnsOffset = 0x1d; |
| 126 #else |
| 127 static const byte kJnsOffset = 0x14; |
| 128 #endif |
| 120 static const byte kCallInstruction = 0xe8; | 129 static const byte kCallInstruction = 0xe8; |
| 121 static const byte kNopByteOne = 0x66; | 130 static const byte kNopByteOne = 0x66; |
| 122 static const byte kNopByteTwo = 0x90; | 131 static const byte kNopByteTwo = 0x90; |
| 123 | 132 |
| 124 // The back edge bookkeeping code matches the pattern: | 133 // The back edge bookkeeping code matches the pattern: |
| 125 // | 134 // |
| 126 // add <profiling_counter>, <-delta> | 135 // add <profiling_counter>, <-delta> |
| 127 // jns ok | 136 // jns ok |
| 128 // call <stack guard> | 137 // call <stack guard> |
| 129 // ok: | 138 // ok: |
| (...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 297 int limit = input_offset - (parameter_count * kPointerSize); | 306 int limit = input_offset - (parameter_count * kPointerSize); |
| 298 while (ok && input_offset > limit) { | 307 while (ok && input_offset > limit) { |
| 299 ok = DoOsrTranslateCommand(&iterator, &input_offset); | 308 ok = DoOsrTranslateCommand(&iterator, &input_offset); |
| 300 } | 309 } |
| 301 | 310 |
| 302 // There are no translation commands for the caller's pc and fp, the | 311 // There are no translation commands for the caller's pc and fp, the |
| 303 // context, and the function. Set them up explicitly. | 312 // context, and the function. Set them up explicitly. |
| 304 for (int i = StandardFrameConstants::kCallerPCOffset; | 313 for (int i = StandardFrameConstants::kCallerPCOffset; |
| 305 ok && i >= StandardFrameConstants::kMarkerOffset; | 314 ok && i >= StandardFrameConstants::kMarkerOffset; |
| 306 i -= kPointerSize) { | 315 i -= kPointerSize) { |
| 316 #ifdef V8_TARGET_ARCH_X32 |
| 317 if (i == StandardFrameConstants::kCallerPCOffset || |
| 318 i == StandardFrameConstants::kCallerFPOffset) { |
| 319 // Set the high-32 bit of PC and FP to 0. |
| 320 output_[0]->SetFrameSlot(output_offset, 0); |
| 321 input_offset -= kPointerSize; |
| 322 output_offset -= kPointerSize; |
| 323 } |
| 324 #endif |
| 307 intptr_t input_value = input_->GetFrameSlot(input_offset); | 325 intptr_t input_value = input_->GetFrameSlot(input_offset); |
| 308 if (FLAG_trace_osr) { | 326 if (FLAG_trace_osr) { |
| 309 const char* name = "UNKNOWN"; | 327 const char* name = "UNKNOWN"; |
| 310 switch (i) { | 328 switch (i) { |
| 311 case StandardFrameConstants::kCallerPCOffset: | 329 case StandardFrameConstants::kCallerPCOffset: |
| 312 name = "caller's pc"; | 330 name = "caller's pc"; |
| 313 break; | 331 break; |
| 314 case StandardFrameConstants::kCallerFPOffset: | 332 case StandardFrameConstants::kCallerFPOffset: |
| 315 name = "fp"; | 333 name = "fp"; |
| 316 break; | 334 break; |
| 317 case StandardFrameConstants::kContextOffset: | 335 case StandardFrameConstants::kContextOffset: |
| 318 name = "context"; | 336 name = "context"; |
| 319 break; | 337 break; |
| 320 case StandardFrameConstants::kMarkerOffset: | 338 case StandardFrameConstants::kMarkerOffset: |
| 321 name = "function"; | 339 name = "function"; |
| 322 break; | 340 break; |
| 323 } | 341 } |
| 324 PrintF(" [rsp + %d] <- 0x%08" V8PRIxPTR " ; [rsp + %d] " | 342 PrintF(" [rsp + %d] <- 0x%08" V8PRIxPTR " ; [rsp + %d] " |
| 325 "(fixed part - %s)\n", | 343 "(fixed part - %s)\n", |
| 326 output_offset, | 344 output_offset, |
| 327 input_value, | 345 input_value, |
| 328 input_offset, | 346 input_offset, |
| 329 name); | 347 name); |
| 330 } | 348 } |
| 331 output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset)); | 349 output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset)); |
| 332 input_offset -= kPointerSize; | 350 input_offset -= kPointerSize; |
| 333 output_offset -= kPointerSize; | 351 output_offset -= kPointerSize; |
| 352 #ifdef V8_TARGET_ARCH_X32 |
| 353 if (i == StandardFrameConstants::kCallerPCOffset) { |
| 354 i -= kHWRegSize - kPointerSize; |
| 355 } |
| 356 #endif |
| 334 } | 357 } |
| 335 | 358 |
| 336 // Translate the rest of the frame. | 359 // Translate the rest of the frame. |
| 337 while (ok && input_offset >= 0) { | 360 while (ok && input_offset >= 0) { |
| 338 ok = DoOsrTranslateCommand(&iterator, &input_offset); | 361 ok = DoOsrTranslateCommand(&iterator, &input_offset); |
| 339 } | 362 } |
| 340 | 363 |
| 341 // If translation of any command failed, continue using the input frame. | 364 // If translation of any command failed, continue using the input frame. |
| 342 if (!ok) { | 365 if (!ok) { |
| 343 delete output_[0]; | 366 delete output_[0]; |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 376 input_->SetRegister(i, i * 4); | 399 input_->SetRegister(i, i * 4); |
| 377 } | 400 } |
| 378 input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp())); | 401 input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp())); |
| 379 input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp())); | 402 input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp())); |
| 380 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { | 403 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { |
| 381 input_->SetDoubleRegister(i, 0.0); | 404 input_->SetDoubleRegister(i, 0.0); |
| 382 } | 405 } |
| 383 | 406 |
| 384 // Fill the frame content from the actual data on the frame. | 407 // Fill the frame content from the actual data on the frame. |
| 385 for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) { | 408 for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) { |
| 409 #ifndef V8_TARGET_ARCH_X32 |
| 386 input_->SetFrameSlot(i, Memory::uint64_at(tos + i)); | 410 input_->SetFrameSlot(i, Memory::uint64_at(tos + i)); |
| 411 #else |
| 412 input_->SetFrameSlot(i, Memory::uint32_at(tos + i)); |
| 413 #endif |
| 387 } | 414 } |
| 388 } | 415 } |
| 389 | 416 |
| 390 | 417 |
| 391 void Deoptimizer::SetPlatformCompiledStubRegisters( | 418 void Deoptimizer::SetPlatformCompiledStubRegisters( |
| 392 FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { | 419 FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { |
| 393 intptr_t handler = | 420 intptr_t handler = |
| 394 reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_); | 421 reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_); |
| 395 int params = descriptor->register_param_count_; | 422 int params = descriptor->register_param_count_; |
| 396 if (descriptor->stack_parameter_count_ != NULL) { | 423 if (descriptor->stack_parameter_count_ != NULL) { |
| (...skipping 12 matching lines...) Expand all Loading... |
| 409 } | 436 } |
| 410 | 437 |
| 411 | 438 |
| 412 bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { | 439 bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { |
| 413 // There is no dynamic alignment padding on x64 in the input frame. | 440 // There is no dynamic alignment padding on x64 in the input frame. |
| 414 return false; | 441 return false; |
| 415 } | 442 } |
| 416 | 443 |
| 417 | 444 |
| 418 #define __ masm()-> | 445 #define __ masm()-> |
| 446 #define __k __ |
| 447 #define __q __ |
| 419 | 448 |
| 420 void Deoptimizer::EntryGenerator::Generate() { | 449 void Deoptimizer::EntryGenerator::Generate() { |
| 421 GeneratePrologue(); | 450 GeneratePrologue(); |
| 422 | 451 |
| 423 // Save all general purpose registers before messing with them. | 452 // Save all general purpose registers before messing with them. |
| 424 const int kNumberOfRegisters = Register::kNumRegisters; | 453 const int kNumberOfRegisters = Register::kNumRegisters; |
| 425 | 454 |
| 426 const int kDoubleRegsSize = kDoubleSize * | 455 const int kDoubleRegsSize = kDoubleSize * |
| 427 XMMRegister::NumAllocatableRegisters(); | 456 XMMRegister::NumAllocatableRegisters(); |
| 428 __ subq(rsp, Immediate(kDoubleRegsSize)); | 457 __ subq(rsp, Immediate(kDoubleRegsSize)); |
| 429 | 458 |
| 430 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) { | 459 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) { |
| 431 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); | 460 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); |
| 432 int offset = i * kDoubleSize; | 461 int offset = i * kDoubleSize; |
| 433 __ movsd(Operand(rsp, offset), xmm_reg); | 462 __ movsd(Operand(rsp, offset), xmm_reg); |
| 434 } | 463 } |
| 435 | 464 |
| 436 // We push all registers onto the stack, even though we do not need | 465 // We push all registers onto the stack, even though we do not need |
| 437 // to restore all later. | 466 // to restore all later. |
| 438 for (int i = 0; i < kNumberOfRegisters; i++) { | 467 for (int i = 0; i < kNumberOfRegisters; i++) { |
| 439 Register r = Register::from_code(i); | 468 Register r = Register::from_code(i); |
| 440 __ push(r); | 469 __k push(r); |
| 441 } | 470 } |
| 442 | 471 |
| 472 #ifndef V8_TARGET_ARCH_X32 |
| 443 const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize + | 473 const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize + |
| 444 kDoubleRegsSize; | 474 kDoubleRegsSize; |
| 475 #else |
| 476 const int kSavedRegistersAreaSize = kNumberOfRegisters * kHWRegSize + |
| 477 kDoubleRegsSize; |
| 478 #endif |
| 445 | 479 |
| 446 // We use this to keep the value of the fifth argument temporarily. | 480 // We use this to keep the value of the fifth argument temporarily. |
| 447 // Unfortunately we can't store it directly in r8 (used for passing | 481 // Unfortunately we can't store it directly in r8 (used for passing |
| 448 // this on linux), since it is another parameter passing register on windows. | 482 // this on linux), since it is another parameter passing register on windows. |
| 449 Register arg5 = r11; | 483 Register arg5 = r11; |
| 450 | 484 |
| 451 // Get the bailout id from the stack. | 485 // Get the bailout id from the stack. |
| 452 __ movq(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize)); | 486 __ movq(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize)); |
| 453 | 487 |
| 454 // Get the address of the location in the code object if possible | 488 // Get the address of the location in the code object if possible |
| 455 // and compute the fp-to-sp delta in register arg5. | 489 // and compute the fp-to-sp delta in register arg5. |
| 456 if (type() == EAGER || type() == SOFT) { | 490 if (type() == EAGER || type() == SOFT) { |
| 457 __ Set(arg_reg_4, 0); | 491 __ Set(arg_reg_4, 0); |
| 458 __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); | 492 __q lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); |
| 459 } else { | 493 } else { |
| 494 #ifndef V8_TARGET_ARCH_X32 |
| 460 __ movq(arg_reg_4, | 495 __ movq(arg_reg_4, |
| 461 Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); | 496 Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); |
| 462 __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize)); | 497 #else |
| 498 __ movl(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kHWRegSize)); |
| 499 #endif |
| 500 __q lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize)); |
| 463 } | 501 } |
| 464 | 502 |
| 465 __ subq(arg5, rbp); | 503 __ subq(arg5, rbp); |
| 466 __ neg(arg5); | 504 __ neg(arg5); |
| 467 | 505 |
| 468 // Allocate a new deoptimizer object. | 506 // Allocate a new deoptimizer object. |
| 469 __ PrepareCallCFunction(6); | 507 __ PrepareCallCFunction(6); |
| 470 __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); | 508 __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); |
| 471 __ movq(arg_reg_1, rax); | 509 __ movq(arg_reg_1, rax); |
| 472 __ Set(arg_reg_2, type()); | 510 __ Set(arg_reg_2, type()); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 486 { AllowExternalCallThatCantCauseGC scope(masm()); | 524 { AllowExternalCallThatCantCauseGC scope(masm()); |
| 487 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); | 525 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); |
| 488 } | 526 } |
| 489 // Preserve deoptimizer object in register rax and get the input | 527 // Preserve deoptimizer object in register rax and get the input |
| 490 // frame descriptor pointer. | 528 // frame descriptor pointer. |
| 491 __ movq(rbx, Operand(rax, Deoptimizer::input_offset())); | 529 __ movq(rbx, Operand(rax, Deoptimizer::input_offset())); |
| 492 | 530 |
| 493 // Fill in the input registers. | 531 // Fill in the input registers. |
| 494 for (int i = kNumberOfRegisters -1; i >= 0; i--) { | 532 for (int i = kNumberOfRegisters -1; i >= 0; i--) { |
| 495 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); | 533 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
| 534 #ifndef V8_TARGET_ARCH_X32 |
| 496 __ pop(Operand(rbx, offset)); | 535 __ pop(Operand(rbx, offset)); |
| 536 #else |
| 537 __ pop(kScratchRegister); |
| 538 __ movl(Operand(rbx, offset), kScratchRegister); |
| 539 #endif |
| 497 } | 540 } |
| 498 | 541 |
| 499 // Fill in the double input registers. | 542 // Fill in the double input registers. |
| 500 int double_regs_offset = FrameDescription::double_registers_offset(); | 543 int double_regs_offset = FrameDescription::double_registers_offset(); |
| 501 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { | 544 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { |
| 502 int dst_offset = i * kDoubleSize + double_regs_offset; | 545 int dst_offset = i * kDoubleSize + double_regs_offset; |
| 503 __ pop(Operand(rbx, dst_offset)); | 546 __k pop(Operand(rbx, dst_offset)); |
| 504 } | 547 } |
| 505 | 548 |
| 506 // Remove the bailout id from the stack. | 549 // Remove the bailout id from the stack. |
| 507 if (type() == EAGER || type() == SOFT) { | 550 if (type() == EAGER || type() == SOFT) { |
| 508 __ addq(rsp, Immediate(kPointerSize)); | 551 __q addq(rsp, Immediate(kPointerSize)); |
| 509 } else { | 552 } else { |
| 510 __ addq(rsp, Immediate(2 * kPointerSize)); | 553 __q addq(rsp, Immediate(2 * kPointerSize)); |
| 511 } | 554 } |
| 512 | 555 |
| 513 // Compute a pointer to the unwinding limit in register rcx; that is | 556 // Compute a pointer to the unwinding limit in register rcx; that is |
| 514 // the first stack slot not part of the input frame. | 557 // the first stack slot not part of the input frame. |
| 515 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); | 558 __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); |
| 516 __ addq(rcx, rsp); | 559 __ addq(rcx, rsp); |
| 517 | 560 |
| 518 // Unwind the stack down to - but not including - the unwinding | 561 // Unwind the stack down to - but not including - the unwinding |
| 519 // limit and copy the contents of the activation frame to the input | 562 // limit and copy the contents of the activation frame to the input |
| 520 // frame description. | 563 // frame description. |
| 521 __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset())); | 564 __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset())); |
| 522 Label pop_loop_header; | 565 Label pop_loop_header; |
| 523 __ jmp(&pop_loop_header); | 566 __ jmp(&pop_loop_header); |
| 524 Label pop_loop; | 567 Label pop_loop; |
| 525 __ bind(&pop_loop); | 568 __ bind(&pop_loop); |
| 526 __ pop(Operand(rdx, 0)); | 569 __ pop(Operand(rdx, 0)); |
| 527 __ addq(rdx, Immediate(sizeof(intptr_t))); | 570 __ addq(rdx, Immediate(sizeof(intptr_t))); |
| 528 __ bind(&pop_loop_header); | 571 __ bind(&pop_loop_header); |
| 529 __ cmpq(rcx, rsp); | 572 __ cmpq(rcx, rsp); |
| 530 __ j(not_equal, &pop_loop); | 573 __ j(not_equal, &pop_loop); |
| 531 | 574 |
| 532 // Compute the output frame in the deoptimizer. | 575 // Compute the output frame in the deoptimizer. |
| 533 __ push(rax); | 576 __k push(rax); |
| 534 __ PrepareCallCFunction(2); | 577 __ PrepareCallCFunction(2); |
| 535 __ movq(arg_reg_1, rax); | 578 __ movq(arg_reg_1, rax); |
| 536 __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate())); | 579 __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate())); |
| 537 { | 580 { |
| 538 AllowExternalCallThatCantCauseGC scope(masm()); | 581 AllowExternalCallThatCantCauseGC scope(masm()); |
| 539 __ CallCFunction( | 582 __ CallCFunction( |
| 540 ExternalReference::compute_output_frames_function(isolate()), 2); | 583 ExternalReference::compute_output_frames_function(isolate()), 2); |
| 541 } | 584 } |
| 542 __ pop(rax); | 585 __k pop(rax); |
| 543 | 586 |
| 544 // Replace the current frame with the output frames. | 587 // Replace the current frame with the output frames. |
| 545 Label outer_push_loop, inner_push_loop, | 588 Label outer_push_loop, inner_push_loop, |
| 546 outer_loop_header, inner_loop_header; | 589 outer_loop_header, inner_loop_header; |
| 547 // Outer loop state: rax = current FrameDescription**, rdx = one past the | 590 // Outer loop state: rax = current FrameDescription**, rdx = one past the |
| 548 // last FrameDescription**. | 591 // last FrameDescription**. |
| 549 __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset())); | 592 __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset())); |
| 550 __ movq(rax, Operand(rax, Deoptimizer::output_offset())); | 593 __ movq(rax, Operand(rax, Deoptimizer::output_offset())); |
| 551 __ lea(rdx, Operand(rax, rdx, times_pointer_size, 0)); | 594 __ lea(rdx, Operand(rax, rdx, times_pointer_size, 0)); |
| 552 __ jmp(&outer_loop_header); | 595 __ jmp(&outer_loop_header); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 567 __ j(below, &outer_push_loop); | 610 __ j(below, &outer_push_loop); |
| 568 | 611 |
| 569 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) { | 612 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) { |
| 570 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); | 613 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); |
| 571 int src_offset = i * kDoubleSize + double_regs_offset; | 614 int src_offset = i * kDoubleSize + double_regs_offset; |
| 572 __ movsd(xmm_reg, Operand(rbx, src_offset)); | 615 __ movsd(xmm_reg, Operand(rbx, src_offset)); |
| 573 } | 616 } |
| 574 | 617 |
| 575 // Push state, pc, and continuation from the last output frame. | 618 // Push state, pc, and continuation from the last output frame. |
| 576 if (type() != OSR) { | 619 if (type() != OSR) { |
| 620 #ifdef V8_TARGET_ARCH_X32 |
| 621 __ Push(Immediate(0)); |
| 622 #endif |
| 577 __ push(Operand(rbx, FrameDescription::state_offset())); | 623 __ push(Operand(rbx, FrameDescription::state_offset())); |
| 578 } | 624 } |
| 625 #ifdef V8_TARGET_ARCH_X32 |
| 626 __ Push(Immediate(0)); |
| 627 #endif |
| 579 __ push(Operand(rbx, FrameDescription::pc_offset())); | 628 __ push(Operand(rbx, FrameDescription::pc_offset())); |
| 629 #ifdef V8_TARGET_ARCH_X32 |
| 630 __ Push(Immediate(0)); |
| 631 #endif |
| 580 __ push(Operand(rbx, FrameDescription::continuation_offset())); | 632 __ push(Operand(rbx, FrameDescription::continuation_offset())); |
| 581 | 633 |
| 582 // Push the registers from the last output frame. | 634 // Push the registers from the last output frame. |
| 583 for (int i = 0; i < kNumberOfRegisters; i++) { | 635 for (int i = 0; i < kNumberOfRegisters; i++) { |
| 584 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); | 636 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
| 637 #ifndef V8_TARGET_ARCH_X32 |
| 585 __ push(Operand(rbx, offset)); | 638 __ push(Operand(rbx, offset)); |
| 639 #else |
| 640 __ movl(kScratchRegister, Operand(rbx, offset)); |
| 641 __ push(kScratchRegister); |
| 642 #endif |
| 586 } | 643 } |
| 587 | 644 |
| 588 // Restore the registers from the stack. | 645 // Restore the registers from the stack. |
| 589 for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) { | 646 for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) { |
| 590 Register r = Register::from_code(i); | 647 Register r = Register::from_code(i); |
| 591 // Do not restore rsp, simply pop the value into the next register | 648 // Do not restore rsp, simply pop the value into the next register |
| 592 // and overwrite this afterwards. | 649 // and overwrite this afterwards. |
| 593 if (r.is(rsp)) { | 650 if (r.is(rsp)) { |
| 594 ASSERT(i > 0); | 651 ASSERT(i > 0); |
| 595 r = Register::from_code(i - 1); | 652 r = Register::from_code(i - 1); |
| 596 } | 653 } |
| 597 __ pop(r); | 654 __k pop(r); |
| 598 } | 655 } |
| 599 | 656 |
| 600 // Set up the roots register. | 657 // Set up the roots register. |
| 601 __ InitializeRootRegister(); | 658 __ InitializeRootRegister(); |
| 602 __ InitializeSmiConstantRegister(); | 659 __ InitializeSmiConstantRegister(); |
| 603 | 660 |
| 604 // Return to the continuation point. | 661 // Return to the continuation point. |
| 605 __ ret(0); | 662 __ ret(0); |
| 606 } | 663 } |
| 607 | 664 |
| 608 | 665 |
| 609 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { | 666 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
| 610 // Create a sequence of deoptimization entries. | 667 // Create a sequence of deoptimization entries. |
| 611 Label done; | 668 Label done; |
| 612 for (int i = 0; i < count(); i++) { | 669 for (int i = 0; i < count(); i++) { |
| 613 int start = masm()->pc_offset(); | 670 int start = masm()->pc_offset(); |
| 614 USE(start); | 671 USE(start); |
| 615 __ push_imm32(i); | 672 __k push_imm32(i); |
| 616 __ jmp(&done); | 673 __ jmp(&done); |
| 617 ASSERT(masm()->pc_offset() - start == table_entry_size_); | 674 ASSERT(masm()->pc_offset() - start == table_entry_size_); |
| 618 } | 675 } |
| 619 __ bind(&done); | 676 __ bind(&done); |
| 620 } | 677 } |
| 621 | 678 |
| 679 #undef __k |
| 680 #undef __k |
| 622 #undef __ | 681 #undef __ |
| 623 | 682 |
| 624 | 683 |
| 625 } } // namespace v8::internal | 684 } } // namespace v8::internal |
| 626 | 685 |
| 627 #endif // V8_TARGET_ARCH_X64 | 686 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |