| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "v8.h" | 5 #include "v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_IA32 | 7 #if V8_TARGET_ARCH_X87 |
| 8 | 8 |
| 9 #include "bootstrapper.h" | 9 #include "bootstrapper.h" |
| 10 #include "code-stubs.h" | 10 #include "code-stubs.h" |
| 11 #include "isolate.h" | 11 #include "isolate.h" |
| 12 #include "jsregexp.h" | 12 #include "jsregexp.h" |
| 13 #include "regexp-macro-assembler.h" | 13 #include "regexp-macro-assembler.h" |
| 14 #include "runtime.h" | 14 #include "runtime.h" |
| 15 #include "stub-cache.h" | 15 #include "stub-cache.h" |
| 16 #include "codegen.h" | 16 #include "codegen.h" |
| 17 #include "runtime.h" | 17 #include "runtime.h" |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 57 Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry; | 57 Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry; |
| 58 } | 58 } |
| 59 | 59 |
| 60 | 60 |
| 61 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( | 61 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( |
| 62 CodeStubInterfaceDescriptor* descriptor) { | 62 CodeStubInterfaceDescriptor* descriptor) { |
| 63 static Register registers[] = { eax, ebx, ecx }; | 63 static Register registers[] = { eax, ebx, ecx }; |
| 64 descriptor->register_param_count_ = 3; | 64 descriptor->register_param_count_ = 3; |
| 65 descriptor->register_params_ = registers; | 65 descriptor->register_params_ = registers; |
| 66 static Representation representations[] = { | 66 static Representation representations[] = { |
| 67 Representation::Tagged(), | 67 Representation::Tagged(), |
| 68 Representation::Smi(), | 68 Representation::Smi(), |
| 69 Representation::Tagged() }; | 69 Representation::Tagged() }; |
| 70 descriptor->register_param_representations_ = representations; | 70 descriptor->register_param_representations_ = representations; |
| 71 descriptor->deoptimization_handler_ = | 71 descriptor->deoptimization_handler_ = |
| 72 Runtime::FunctionForId( | 72 Runtime::FunctionForId( |
| 73 Runtime::kHiddenCreateArrayLiteralStubBailout)->entry; | 73 Runtime::kHiddenCreateArrayLiteralStubBailout)->entry; |
| 74 } | 74 } |
| 75 | 75 |
| 76 | 76 |
| 77 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( | 77 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( |
| 78 CodeStubInterfaceDescriptor* descriptor) { | 78 CodeStubInterfaceDescriptor* descriptor) { |
| 79 static Register registers[] = { eax, ebx, ecx, edx }; | 79 static Register registers[] = { eax, ebx, ecx, edx }; |
| (...skipping 392 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 472 | 472 |
| 473 __ ret(0); | 473 __ ret(0); |
| 474 } | 474 } |
| 475 | 475 |
| 476 | 476 |
| 477 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | 477 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
| 478 // We don't allow a GC during a store buffer overflow so there is no need to | 478 // We don't allow a GC during a store buffer overflow so there is no need to |
| 479 // store the registers in any particular way, but we do have to store and | 479 // store the registers in any particular way, but we do have to store and |
| 480 // restore them. | 480 // restore them. |
| 481 __ pushad(); | 481 __ pushad(); |
| 482 if (save_doubles_ == kSaveFPRegs) { | |
| 483 __ sub(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); | |
| 484 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { | |
| 485 XMMRegister reg = XMMRegister::from_code(i); | |
| 486 __ movsd(Operand(esp, i * kDoubleSize), reg); | |
| 487 } | |
| 488 } | |
| 489 const int argument_count = 1; | 482 const int argument_count = 1; |
| 490 | 483 |
| 491 AllowExternalCallThatCantCauseGC scope(masm); | 484 AllowExternalCallThatCantCauseGC scope(masm); |
| 492 __ PrepareCallCFunction(argument_count, ecx); | 485 __ PrepareCallCFunction(argument_count, ecx); |
| 493 __ mov(Operand(esp, 0 * kPointerSize), | 486 __ mov(Operand(esp, 0 * kPointerSize), |
| 494 Immediate(ExternalReference::isolate_address(isolate()))); | 487 Immediate(ExternalReference::isolate_address(isolate()))); |
| 495 __ CallCFunction( | 488 __ CallCFunction( |
| 496 ExternalReference::store_buffer_overflow_function(isolate()), | 489 ExternalReference::store_buffer_overflow_function(isolate()), |
| 497 argument_count); | 490 argument_count); |
| 498 if (save_doubles_ == kSaveFPRegs) { | |
| 499 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { | |
| 500 XMMRegister reg = XMMRegister::from_code(i); | |
| 501 __ movsd(reg, Operand(esp, i * kDoubleSize)); | |
| 502 } | |
| 503 __ add(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); | |
| 504 } | |
| 505 __ popad(); | 491 __ popad(); |
| 506 __ ret(0); | 492 __ ret(0); |
| 507 } | 493 } |
| 508 | 494 |
| 509 | 495 |
| 510 class FloatingPointHelper : public AllStatic { | 496 class FloatingPointHelper : public AllStatic { |
| 511 public: | 497 public: |
| 512 enum ArgLocation { | 498 enum ArgLocation { |
| 513 ARGS_ON_STACK, | 499 ARGS_ON_STACK, |
| 514 ARGS_IN_REGISTERS | 500 ARGS_IN_REGISTERS |
| 515 }; | 501 }; |
| 516 | 502 |
| 517 // Code pattern for loading a floating point value. Input value must | 503 // Code pattern for loading a floating point value. Input value must |
| 518 // be either a smi or a heap number object (fp value). Requirements: | 504 // be either a smi or a heap number object (fp value). Requirements: |
| 519 // operand in register number. Returns operand as floating point number | 505 // operand in register number. Returns operand as floating point number |
| 520 // on FPU stack. | 506 // on FPU stack. |
| 521 static void LoadFloatOperand(MacroAssembler* masm, Register number); | 507 static void LoadFloatOperand(MacroAssembler* masm, Register number); |
| 522 | 508 |
| 523 // Test if operands are smi or number objects (fp). Requirements: | 509 // Test if operands are smi or number objects (fp). Requirements: |
| 524 // operand_1 in eax, operand_2 in edx; falls through on float | 510 // operand_1 in eax, operand_2 in edx; falls through on float |
| 525 // operands, jumps to the non_float label otherwise. | 511 // operands, jumps to the non_float label otherwise. |
| 526 static void CheckFloatOperands(MacroAssembler* masm, | 512 static void CheckFloatOperands(MacroAssembler* masm, |
| 527 Label* non_float, | 513 Label* non_float, |
| 528 Register scratch); | 514 Register scratch); |
| 529 | |
| 530 // Test if operands are numbers (smi or HeapNumber objects), and load | |
| 531 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if | |
| 532 // either operand is not a number. Operands are in edx and eax. | |
| 533 // Leaves operands unchanged. | |
| 534 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); | |
| 535 }; | 515 }; |
| 536 | 516 |
| 537 | 517 |
| 538 void DoubleToIStub::Generate(MacroAssembler* masm) { | 518 void DoubleToIStub::Generate(MacroAssembler* masm) { |
| 539 Register input_reg = this->source(); | 519 Register input_reg = this->source(); |
| 540 Register final_result_reg = this->destination(); | 520 Register final_result_reg = this->destination(); |
| 541 ASSERT(is_truncating()); | 521 ASSERT(is_truncating()); |
| 542 | 522 |
| 543 Label check_negative, process_64_bits, done, done_no_stash; | 523 Label check_negative, process_64_bits, done, done_no_stash; |
| 544 | 524 |
| (...skipping 19 matching lines...) Expand all Loading... |
| 564 Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg; | 544 Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg; |
| 565 // Save ecx if it isn't the return register and therefore volatile, or if it | 545 // Save ecx if it isn't the return register and therefore volatile, or if it |
| 566 // is the return register, then save the temp register we use in its stead for | 546 // is the return register, then save the temp register we use in its stead for |
| 567 // the result. | 547 // the result. |
| 568 Register save_reg = final_result_reg.is(ecx) ? eax : ecx; | 548 Register save_reg = final_result_reg.is(ecx) ? eax : ecx; |
| 569 __ push(scratch1); | 549 __ push(scratch1); |
| 570 __ push(save_reg); | 550 __ push(save_reg); |
| 571 | 551 |
| 572 bool stash_exponent_copy = !input_reg.is(esp); | 552 bool stash_exponent_copy = !input_reg.is(esp); |
| 573 __ mov(scratch1, mantissa_operand); | 553 __ mov(scratch1, mantissa_operand); |
| 574 if (CpuFeatures::IsSupported(SSE3)) { | |
| 575 CpuFeatureScope scope(masm, SSE3); | |
| 576 // Load x87 register with heap number. | |
| 577 __ fld_d(mantissa_operand); | |
| 578 } | |
| 579 __ mov(ecx, exponent_operand); | 554 __ mov(ecx, exponent_operand); |
| 580 if (stash_exponent_copy) __ push(ecx); | 555 if (stash_exponent_copy) __ push(ecx); |
| 581 | 556 |
| 582 __ and_(ecx, HeapNumber::kExponentMask); | 557 __ and_(ecx, HeapNumber::kExponentMask); |
| 583 __ shr(ecx, HeapNumber::kExponentShift); | 558 __ shr(ecx, HeapNumber::kExponentShift); |
| 584 __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias)); | 559 __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias)); |
| 585 __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits)); | 560 __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits)); |
| 586 __ j(below, &process_64_bits); | 561 __ j(below, &process_64_bits); |
| 587 | 562 |
| 588 // Result is entirely in lower 32-bits of mantissa | 563 // Result is entirely in lower 32-bits of mantissa |
| 589 int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize; | 564 int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize; |
| 590 if (CpuFeatures::IsSupported(SSE3)) { | |
| 591 __ fstp(0); | |
| 592 } | |
| 593 __ sub(ecx, Immediate(delta)); | 565 __ sub(ecx, Immediate(delta)); |
| 594 __ xor_(result_reg, result_reg); | 566 __ xor_(result_reg, result_reg); |
| 595 __ cmp(ecx, Immediate(31)); | 567 __ cmp(ecx, Immediate(31)); |
| 596 __ j(above, &done); | 568 __ j(above, &done); |
| 597 __ shl_cl(scratch1); | 569 __ shl_cl(scratch1); |
| 598 __ jmp(&check_negative); | 570 __ jmp(&check_negative); |
| 599 | 571 |
| 600 __ bind(&process_64_bits); | 572 __ bind(&process_64_bits); |
| 601 if (CpuFeatures::IsSupported(SSE3)) { | 573 // Result must be extracted from shifted 32-bit mantissa |
| 602 CpuFeatureScope scope(masm, SSE3); | 574 __ sub(ecx, Immediate(delta)); |
| 603 if (stash_exponent_copy) { | 575 __ neg(ecx); |
| 604 // Already a copy of the exponent on the stack, overwrite it. | 576 if (stash_exponent_copy) { |
| 605 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); | 577 __ mov(result_reg, MemOperand(esp, 0)); |
| 606 __ sub(esp, Immediate(kDoubleSize / 2)); | |
| 607 } else { | |
| 608 // Reserve space for 64 bit answer. | |
| 609 __ sub(esp, Immediate(kDoubleSize)); // Nolint. | |
| 610 } | |
| 611 // Do conversion, which cannot fail because we checked the exponent. | |
| 612 __ fisttp_d(Operand(esp, 0)); | |
| 613 __ mov(result_reg, Operand(esp, 0)); // Load low word of answer as result | |
| 614 __ add(esp, Immediate(kDoubleSize)); | |
| 615 __ jmp(&done_no_stash); | |
| 616 } else { | 578 } else { |
| 617 // Result must be extracted from shifted 32-bit mantissa | 579 __ mov(result_reg, exponent_operand); |
| 618 __ sub(ecx, Immediate(delta)); | 580 } |
| 619 __ neg(ecx); | 581 __ and_(result_reg, |
| 620 if (stash_exponent_copy) { | 582 Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32))); |
| 621 __ mov(result_reg, MemOperand(esp, 0)); | 583 __ add(result_reg, |
| 622 } else { | 584 Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32))); |
| 623 __ mov(result_reg, exponent_operand); | 585 __ shrd(result_reg, scratch1); |
| 624 } | 586 __ shr_cl(result_reg); |
| 625 __ and_(result_reg, | 587 __ test(ecx, Immediate(32)); |
| 626 Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32))); | 588 { |
| 627 __ add(result_reg, | 589 Label skip_mov; |
| 628 Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32))); | 590 __ j(equal, &skip_mov, Label::kNear); |
| 629 __ shrd(result_reg, scratch1); | 591 __ mov(scratch1, result_reg); |
| 630 __ shr_cl(result_reg); | 592 __ bind(&skip_mov); |
| 631 __ test(ecx, Immediate(32)); | |
| 632 __ cmov(not_equal, scratch1, result_reg); | |
| 633 } | 593 } |
| 634 | 594 |
| 635 // If the double was negative, negate the integer result. | 595 // If the double was negative, negate the integer result. |
| 636 __ bind(&check_negative); | 596 __ bind(&check_negative); |
| 637 __ mov(result_reg, scratch1); | 597 __ mov(result_reg, scratch1); |
| 638 __ neg(result_reg); | 598 __ neg(result_reg); |
| 639 if (stash_exponent_copy) { | 599 if (stash_exponent_copy) { |
| 640 __ cmp(MemOperand(esp, 0), Immediate(0)); | 600 __ cmp(MemOperand(esp, 0), Immediate(0)); |
| 641 } else { | 601 } else { |
| 642 __ cmp(exponent_operand, Immediate(0)); | 602 __ cmp(exponent_operand, Immediate(0)); |
| 643 } | 603 } |
| 644 __ cmov(greater, result_reg, scratch1); | 604 { |
| 605 Label skip_mov; |
| 606 __ j(less_equal, &skip_mov, Label::kNear); |
| 607 __ mov(result_reg, scratch1); |
| 608 __ bind(&skip_mov); |
| 609 } |
| 645 | 610 |
| 646 // Restore registers | 611 // Restore registers |
| 647 __ bind(&done); | 612 __ bind(&done); |
| 648 if (stash_exponent_copy) { | 613 if (stash_exponent_copy) { |
| 649 __ add(esp, Immediate(kDoubleSize / 2)); | 614 __ add(esp, Immediate(kDoubleSize / 2)); |
| 650 } | 615 } |
| 651 __ bind(&done_no_stash); | 616 __ bind(&done_no_stash); |
| 652 if (!final_result_reg.is(result_reg)) { | 617 if (!final_result_reg.is(result_reg)) { |
| 653 ASSERT(final_result_reg.is(ecx)); | 618 ASSERT(final_result_reg.is(ecx)); |
| 654 __ mov(final_result_reg, result_reg); | 619 __ mov(final_result_reg, result_reg); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 670 __ bind(&load_smi); | 635 __ bind(&load_smi); |
| 671 __ SmiUntag(number); | 636 __ SmiUntag(number); |
| 672 __ push(number); | 637 __ push(number); |
| 673 __ fild_s(Operand(esp, 0)); | 638 __ fild_s(Operand(esp, 0)); |
| 674 __ pop(number); | 639 __ pop(number); |
| 675 | 640 |
| 676 __ bind(&done); | 641 __ bind(&done); |
| 677 } | 642 } |
| 678 | 643 |
| 679 | 644 |
| 680 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, | |
| 681 Label* not_numbers) { | |
| 682 Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done; | |
| 683 // Load operand in edx into xmm0, or branch to not_numbers. | |
| 684 __ JumpIfSmi(edx, &load_smi_edx, Label::kNear); | |
| 685 Factory* factory = masm->isolate()->factory(); | |
| 686 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map()); | |
| 687 __ j(not_equal, not_numbers); // Argument in edx is not a number. | |
| 688 __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); | |
| 689 __ bind(&load_eax); | |
| 690 // Load operand in eax into xmm1, or branch to not_numbers. | |
| 691 __ JumpIfSmi(eax, &load_smi_eax, Label::kNear); | |
| 692 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map()); | |
| 693 __ j(equal, &load_float_eax, Label::kNear); | |
| 694 __ jmp(not_numbers); // Argument in eax is not a number. | |
| 695 __ bind(&load_smi_edx); | |
| 696 __ SmiUntag(edx); // Untag smi before converting to float. | |
| 697 __ Cvtsi2sd(xmm0, edx); | |
| 698 __ SmiTag(edx); // Retag smi for heap number overwriting test. | |
| 699 __ jmp(&load_eax); | |
| 700 __ bind(&load_smi_eax); | |
| 701 __ SmiUntag(eax); // Untag smi before converting to float. | |
| 702 __ Cvtsi2sd(xmm1, eax); | |
| 703 __ SmiTag(eax); // Retag smi for heap number overwriting test. | |
| 704 __ jmp(&done, Label::kNear); | |
| 705 __ bind(&load_float_eax); | |
| 706 __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); | |
| 707 __ bind(&done); | |
| 708 } | |
| 709 | |
| 710 | |
| 711 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, | 645 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, |
| 712 Label* non_float, | 646 Label* non_float, |
| 713 Register scratch) { | 647 Register scratch) { |
| 714 Label test_other, done; | 648 Label test_other, done; |
| 715 // Test if both operands are floats or smi -> scratch=k_is_float; | 649 // Test if both operands are floats or smi -> scratch=k_is_float; |
| 716 // Otherwise scratch = k_not_float. | 650 // Otherwise scratch = k_not_float. |
| 717 __ JumpIfSmi(edx, &test_other, Label::kNear); | 651 __ JumpIfSmi(edx, &test_other, Label::kNear); |
| 718 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); | 652 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); |
| 719 Factory* factory = masm->isolate()->factory(); | 653 Factory* factory = masm->isolate()->factory(); |
| 720 __ cmp(scratch, factory->heap_number_map()); | 654 __ cmp(scratch, factory->heap_number_map()); |
| 721 __ j(not_equal, non_float); // argument in edx is not a number -> NaN | 655 __ j(not_equal, non_float); // argument in edx is not a number -> NaN |
| 722 | 656 |
| 723 __ bind(&test_other); | 657 __ bind(&test_other); |
| 724 __ JumpIfSmi(eax, &done, Label::kNear); | 658 __ JumpIfSmi(eax, &done, Label::kNear); |
| 725 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); | 659 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); |
| 726 __ cmp(scratch, factory->heap_number_map()); | 660 __ cmp(scratch, factory->heap_number_map()); |
| 727 __ j(not_equal, non_float); // argument in eax is not a number -> NaN | 661 __ j(not_equal, non_float); // argument in eax is not a number -> NaN |
| 728 | 662 |
| 729 // Fall-through: Both operands are numbers. | 663 // Fall-through: Both operands are numbers. |
| 730 __ bind(&done); | 664 __ bind(&done); |
| 731 } | 665 } |
| 732 | 666 |
| 733 | 667 |
| 734 void MathPowStub::Generate(MacroAssembler* masm) { | 668 void MathPowStub::Generate(MacroAssembler* masm) { |
| 735 Factory* factory = isolate()->factory(); | 669 // No SSE2 support |
| 736 const Register exponent = eax; | 670 UNREACHABLE(); |
| 737 const Register base = edx; | |
| 738 const Register scratch = ecx; | |
| 739 const XMMRegister double_result = xmm3; | |
| 740 const XMMRegister double_base = xmm2; | |
| 741 const XMMRegister double_exponent = xmm1; | |
| 742 const XMMRegister double_scratch = xmm4; | |
| 743 | |
| 744 Label call_runtime, done, exponent_not_smi, int_exponent; | |
| 745 | |
| 746 // Save 1 in double_result - we need this several times later on. | |
| 747 __ mov(scratch, Immediate(1)); | |
| 748 __ Cvtsi2sd(double_result, scratch); | |
| 749 | |
| 750 if (exponent_type_ == ON_STACK) { | |
| 751 Label base_is_smi, unpack_exponent; | |
| 752 // The exponent and base are supplied as arguments on the stack. | |
| 753 // This can only happen if the stub is called from non-optimized code. | |
| 754 // Load input parameters from stack. | |
| 755 __ mov(base, Operand(esp, 2 * kPointerSize)); | |
| 756 __ mov(exponent, Operand(esp, 1 * kPointerSize)); | |
| 757 | |
| 758 __ JumpIfSmi(base, &base_is_smi, Label::kNear); | |
| 759 __ cmp(FieldOperand(base, HeapObject::kMapOffset), | |
| 760 factory->heap_number_map()); | |
| 761 __ j(not_equal, &call_runtime); | |
| 762 | |
| 763 __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset)); | |
| 764 __ jmp(&unpack_exponent, Label::kNear); | |
| 765 | |
| 766 __ bind(&base_is_smi); | |
| 767 __ SmiUntag(base); | |
| 768 __ Cvtsi2sd(double_base, base); | |
| 769 | |
| 770 __ bind(&unpack_exponent); | |
| 771 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); | |
| 772 __ SmiUntag(exponent); | |
| 773 __ jmp(&int_exponent); | |
| 774 | |
| 775 __ bind(&exponent_not_smi); | |
| 776 __ cmp(FieldOperand(exponent, HeapObject::kMapOffset), | |
| 777 factory->heap_number_map()); | |
| 778 __ j(not_equal, &call_runtime); | |
| 779 __ movsd(double_exponent, | |
| 780 FieldOperand(exponent, HeapNumber::kValueOffset)); | |
| 781 } else if (exponent_type_ == TAGGED) { | |
| 782 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); | |
| 783 __ SmiUntag(exponent); | |
| 784 __ jmp(&int_exponent); | |
| 785 | |
| 786 __ bind(&exponent_not_smi); | |
| 787 __ movsd(double_exponent, | |
| 788 FieldOperand(exponent, HeapNumber::kValueOffset)); | |
| 789 } | |
| 790 | |
| 791 if (exponent_type_ != INTEGER) { | |
| 792 Label fast_power, try_arithmetic_simplification; | |
| 793 __ DoubleToI(exponent, double_exponent, double_scratch, | |
| 794 TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification); | |
| 795 __ jmp(&int_exponent); | |
| 796 | |
| 797 __ bind(&try_arithmetic_simplification); | |
| 798 // Skip to runtime if possibly NaN (indicated by the indefinite integer). | |
| 799 __ cvttsd2si(exponent, Operand(double_exponent)); | |
| 800 __ cmp(exponent, Immediate(0x1)); | |
| 801 __ j(overflow, &call_runtime); | |
| 802 | |
| 803 if (exponent_type_ == ON_STACK) { | |
| 804 // Detect square root case. Crankshaft detects constant +/-0.5 at | |
| 805 // compile time and uses DoMathPowHalf instead. We then skip this check | |
| 806 // for non-constant cases of +/-0.5 as these hardly occur. | |
| 807 Label continue_sqrt, continue_rsqrt, not_plus_half; | |
| 808 // Test for 0.5. | |
| 809 // Load double_scratch with 0.5. | |
| 810 __ mov(scratch, Immediate(0x3F000000u)); | |
| 811 __ movd(double_scratch, scratch); | |
| 812 __ cvtss2sd(double_scratch, double_scratch); | |
| 813 // Already ruled out NaNs for exponent. | |
| 814 __ ucomisd(double_scratch, double_exponent); | |
| 815 __ j(not_equal, ¬_plus_half, Label::kNear); | |
| 816 | |
| 817 // Calculates square root of base. Check for the special case of | |
| 818 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). | |
| 819 // According to IEEE-754, single-precision -Infinity has the highest | |
| 820 // 9 bits set and the lowest 23 bits cleared. | |
| 821 __ mov(scratch, 0xFF800000u); | |
| 822 __ movd(double_scratch, scratch); | |
| 823 __ cvtss2sd(double_scratch, double_scratch); | |
| 824 __ ucomisd(double_base, double_scratch); | |
| 825 // Comparing -Infinity with NaN results in "unordered", which sets the | |
| 826 // zero flag as if both were equal. However, it also sets the carry flag. | |
| 827 __ j(not_equal, &continue_sqrt, Label::kNear); | |
| 828 __ j(carry, &continue_sqrt, Label::kNear); | |
| 829 | |
| 830 // Set result to Infinity in the special case. | |
| 831 __ xorps(double_result, double_result); | |
| 832 __ subsd(double_result, double_scratch); | |
| 833 __ jmp(&done); | |
| 834 | |
| 835 __ bind(&continue_sqrt); | |
| 836 // sqrtsd returns -0 when input is -0. ECMA spec requires +0. | |
| 837 __ xorps(double_scratch, double_scratch); | |
| 838 __ addsd(double_scratch, double_base); // Convert -0 to +0. | |
| 839 __ sqrtsd(double_result, double_scratch); | |
| 840 __ jmp(&done); | |
| 841 | |
| 842 // Test for -0.5. | |
| 843 __ bind(¬_plus_half); | |
| 844 // Load double_exponent with -0.5 by substracting 1. | |
| 845 __ subsd(double_scratch, double_result); | |
| 846 // Already ruled out NaNs for exponent. | |
| 847 __ ucomisd(double_scratch, double_exponent); | |
| 848 __ j(not_equal, &fast_power, Label::kNear); | |
| 849 | |
| 850 // Calculates reciprocal of square root of base. Check for the special | |
| 851 // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). | |
| 852 // According to IEEE-754, single-precision -Infinity has the highest | |
| 853 // 9 bits set and the lowest 23 bits cleared. | |
| 854 __ mov(scratch, 0xFF800000u); | |
| 855 __ movd(double_scratch, scratch); | |
| 856 __ cvtss2sd(double_scratch, double_scratch); | |
| 857 __ ucomisd(double_base, double_scratch); | |
| 858 // Comparing -Infinity with NaN results in "unordered", which sets the | |
| 859 // zero flag as if both were equal. However, it also sets the carry flag. | |
| 860 __ j(not_equal, &continue_rsqrt, Label::kNear); | |
| 861 __ j(carry, &continue_rsqrt, Label::kNear); | |
| 862 | |
| 863 // Set result to 0 in the special case. | |
| 864 __ xorps(double_result, double_result); | |
| 865 __ jmp(&done); | |
| 866 | |
| 867 __ bind(&continue_rsqrt); | |
| 868 // sqrtsd returns -0 when input is -0. ECMA spec requires +0. | |
| 869 __ xorps(double_exponent, double_exponent); | |
| 870 __ addsd(double_exponent, double_base); // Convert -0 to +0. | |
| 871 __ sqrtsd(double_exponent, double_exponent); | |
| 872 __ divsd(double_result, double_exponent); | |
| 873 __ jmp(&done); | |
| 874 } | |
| 875 | |
| 876 // Using FPU instructions to calculate power. | |
| 877 Label fast_power_failed; | |
| 878 __ bind(&fast_power); | |
| 879 __ fnclex(); // Clear flags to catch exceptions later. | |
| 880 // Transfer (B)ase and (E)xponent onto the FPU register stack. | |
| 881 __ sub(esp, Immediate(kDoubleSize)); | |
| 882 __ movsd(Operand(esp, 0), double_exponent); | |
| 883 __ fld_d(Operand(esp, 0)); // E | |
| 884 __ movsd(Operand(esp, 0), double_base); | |
| 885 __ fld_d(Operand(esp, 0)); // B, E | |
| 886 | |
| 887 // Exponent is in st(1) and base is in st(0) | |
| 888 // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B) | |
| 889 // FYL2X calculates st(1) * log2(st(0)) | |
| 890 __ fyl2x(); // X | |
| 891 __ fld(0); // X, X | |
| 892 __ frndint(); // rnd(X), X | |
| 893 __ fsub(1); // rnd(X), X-rnd(X) | |
| 894 __ fxch(1); // X - rnd(X), rnd(X) | |
| 895 // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1 | |
| 896 __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X) | |
| 897 __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X) | |
| 898 __ faddp(1); // 2^(X-rnd(X)), rnd(X) | |
| 899 // FSCALE calculates st(0) * 2^st(1) | |
| 900 __ fscale(); // 2^X, rnd(X) | |
| 901 __ fstp(1); // 2^X | |
| 902 // Bail out to runtime in case of exceptions in the status word. | |
| 903 __ fnstsw_ax(); | |
| 904 __ test_b(eax, 0x5F); // We check for all but precision exception. | |
| 905 __ j(not_zero, &fast_power_failed, Label::kNear); | |
| 906 __ fstp_d(Operand(esp, 0)); | |
| 907 __ movsd(double_result, Operand(esp, 0)); | |
| 908 __ add(esp, Immediate(kDoubleSize)); | |
| 909 __ jmp(&done); | |
| 910 | |
| 911 __ bind(&fast_power_failed); | |
| 912 __ fninit(); | |
| 913 __ add(esp, Immediate(kDoubleSize)); | |
| 914 __ jmp(&call_runtime); | |
| 915 } | |
| 916 | |
| 917 // Calculate power with integer exponent. | |
| 918 __ bind(&int_exponent); | |
| 919 const XMMRegister double_scratch2 = double_exponent; | |
| 920 __ mov(scratch, exponent); // Back up exponent. | |
| 921 __ movsd(double_scratch, double_base); // Back up base. | |
| 922 __ movsd(double_scratch2, double_result); // Load double_exponent with 1. | |
| 923 | |
| 924 // Get absolute value of exponent. | |
| 925 Label no_neg, while_true, while_false; | |
| 926 __ test(scratch, scratch); | |
| 927 __ j(positive, &no_neg, Label::kNear); | |
| 928 __ neg(scratch); | |
| 929 __ bind(&no_neg); | |
| 930 | |
| 931 __ j(zero, &while_false, Label::kNear); | |
| 932 __ shr(scratch, 1); | |
| 933 // Above condition means CF==0 && ZF==0. This means that the | |
| 934 // bit that has been shifted out is 0 and the result is not 0. | |
| 935 __ j(above, &while_true, Label::kNear); | |
| 936 __ movsd(double_result, double_scratch); | |
| 937 __ j(zero, &while_false, Label::kNear); | |
| 938 | |
| 939 __ bind(&while_true); | |
| 940 __ shr(scratch, 1); | |
| 941 __ mulsd(double_scratch, double_scratch); | |
| 942 __ j(above, &while_true, Label::kNear); | |
| 943 __ mulsd(double_result, double_scratch); | |
| 944 __ j(not_zero, &while_true); | |
| 945 | |
| 946 __ bind(&while_false); | |
| 947 // scratch has the original value of the exponent - if the exponent is | |
| 948 // negative, return 1/result. | |
| 949 __ test(exponent, exponent); | |
| 950 __ j(positive, &done); | |
| 951 __ divsd(double_scratch2, double_result); | |
| 952 __ movsd(double_result, double_scratch2); | |
| 953 // Test whether result is zero. Bail out to check for subnormal result. | |
| 954 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. | |
| 955 __ xorps(double_scratch2, double_scratch2); | |
| 956 __ ucomisd(double_scratch2, double_result); // Result cannot be NaN. | |
| 957 // double_exponent aliased as double_scratch2 has already been overwritten | |
| 958 // and may not have contained the exponent value in the first place when the | |
| 959 // exponent is a smi. We reset it with exponent value before bailing out. | |
| 960 __ j(not_equal, &done); | |
| 961 __ Cvtsi2sd(double_exponent, exponent); | |
| 962 | |
| 963 // Returning or bailing out. | |
| 964 Counters* counters = isolate()->counters(); | |
| 965 if (exponent_type_ == ON_STACK) { | |
| 966 // The arguments are still on the stack. | |
| 967 __ bind(&call_runtime); | |
| 968 __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1); | |
| 969 | |
| 970 // The stub is called from non-optimized code, which expects the result | |
| 971 // as heap number in exponent. | |
| 972 __ bind(&done); | |
| 973 __ AllocateHeapNumber(eax, scratch, base, &call_runtime); | |
| 974 __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result); | |
| 975 __ IncrementCounter(counters->math_pow(), 1); | |
| 976 __ ret(2 * kPointerSize); | |
| 977 } else { | |
| 978 __ bind(&call_runtime); | |
| 979 { | |
| 980 AllowExternalCallThatCantCauseGC scope(masm); | |
| 981 __ PrepareCallCFunction(4, scratch); | |
| 982 __ movsd(Operand(esp, 0 * kDoubleSize), double_base); | |
| 983 __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent); | |
| 984 __ CallCFunction( | |
| 985 ExternalReference::power_double_double_function(isolate()), 4); | |
| 986 } | |
| 987 // Return value is in st(0) on ia32. | |
| 988 // Store it into the (fixed) result register. | |
| 989 __ sub(esp, Immediate(kDoubleSize)); | |
| 990 __ fstp_d(Operand(esp, 0)); | |
| 991 __ movsd(double_result, Operand(esp, 0)); | |
| 992 __ add(esp, Immediate(kDoubleSize)); | |
| 993 | |
| 994 __ bind(&done); | |
| 995 __ IncrementCounter(counters->math_pow(), 1); | |
| 996 __ ret(0); | |
| 997 } | |
| 998 } | 671 } |
| 999 | 672 |
| 1000 | 673 |
| 1001 void FunctionPrototypeStub::Generate(MacroAssembler* masm) { | 674 void FunctionPrototypeStub::Generate(MacroAssembler* masm) { |
| 1002 // ----------- S t a t e ------------- | 675 // ----------- S t a t e ------------- |
| 1003 // -- ecx : name | 676 // -- ecx : name |
| 1004 // -- edx : receiver | 677 // -- edx : receiver |
| 1005 // -- esp[0] : return address | 678 // -- esp[0] : return address |
| 1006 // ----------------------------------- | 679 // ----------------------------------- |
| 1007 Label miss; | 680 Label miss; |
| (...skipping 753 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1761 __ SmiTag(edx); // Number of capture registers to smi. | 1434 __ SmiTag(edx); // Number of capture registers to smi. |
| 1762 __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx); | 1435 __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx); |
| 1763 __ SmiUntag(edx); // Number of capture registers back from smi. | 1436 __ SmiUntag(edx); // Number of capture registers back from smi. |
| 1764 // Store last subject and last input. | 1437 // Store last subject and last input. |
| 1765 __ mov(eax, Operand(esp, kSubjectOffset)); | 1438 __ mov(eax, Operand(esp, kSubjectOffset)); |
| 1766 __ mov(ecx, eax); | 1439 __ mov(ecx, eax); |
| 1767 __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); | 1440 __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); |
| 1768 __ RecordWriteField(ebx, | 1441 __ RecordWriteField(ebx, |
| 1769 RegExpImpl::kLastSubjectOffset, | 1442 RegExpImpl::kLastSubjectOffset, |
| 1770 eax, | 1443 eax, |
| 1771 edi, | 1444 edi); |
| 1772 kDontSaveFPRegs); | |
| 1773 __ mov(eax, ecx); | 1445 __ mov(eax, ecx); |
| 1774 __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); | 1446 __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); |
| 1775 __ RecordWriteField(ebx, | 1447 __ RecordWriteField(ebx, |
| 1776 RegExpImpl::kLastInputOffset, | 1448 RegExpImpl::kLastInputOffset, |
| 1777 eax, | 1449 eax, |
| 1778 edi, | 1450 edi); |
| 1779 kDontSaveFPRegs); | |
| 1780 | 1451 |
| 1781 // Get the static offsets vector filled by the native regexp code. | 1452 // Get the static offsets vector filled by the native regexp code. |
| 1782 ExternalReference address_of_static_offsets_vector = | 1453 ExternalReference address_of_static_offsets_vector = |
| 1783 ExternalReference::address_of_static_offsets_vector(isolate()); | 1454 ExternalReference::address_of_static_offsets_vector(isolate()); |
| 1784 __ mov(ecx, Immediate(address_of_static_offsets_vector)); | 1455 __ mov(ecx, Immediate(address_of_static_offsets_vector)); |
| 1785 | 1456 |
| 1786 // ebx: last_match_info backing store (FixedArray) | 1457 // ebx: last_match_info backing store (FixedArray) |
| 1787 // ecx: offsets vector | 1458 // ecx: offsets vector |
| 1788 // edx: number of capture registers | 1459 // edx: number of capture registers |
| 1789 Label next_capture, done; | 1460 Label next_capture, done; |
| (...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2039 __ j(equal, &return_not_equal); | 1710 __ j(equal, &return_not_equal); |
| 2040 | 1711 |
| 2041 // Fall through to the general case. | 1712 // Fall through to the general case. |
| 2042 __ bind(&slow); | 1713 __ bind(&slow); |
| 2043 } | 1714 } |
| 2044 | 1715 |
| 2045 // Generate the number comparison code. | 1716 // Generate the number comparison code. |
| 2046 Label non_number_comparison; | 1717 Label non_number_comparison; |
| 2047 Label unordered; | 1718 Label unordered; |
| 2048 __ bind(&generic_heap_number_comparison); | 1719 __ bind(&generic_heap_number_comparison); |
| 1720 FloatingPointHelper::CheckFloatOperands( |
| 1721 masm, &non_number_comparison, ebx); |
| 1722 FloatingPointHelper::LoadFloatOperand(masm, eax); |
| 1723 FloatingPointHelper::LoadFloatOperand(masm, edx); |
| 1724 __ FCmp(); |
| 2049 | 1725 |
| 2050 FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); | |
| 2051 __ ucomisd(xmm0, xmm1); | |
| 2052 // Don't base result on EFLAGS when a NaN is involved. | 1726 // Don't base result on EFLAGS when a NaN is involved. |
| 2053 __ j(parity_even, &unordered, Label::kNear); | 1727 __ j(parity_even, &unordered, Label::kNear); |
| 2054 | 1728 |
| 2055 __ mov(eax, 0); // equal | 1729 Label below_label, above_label; |
| 2056 __ mov(ecx, Immediate(Smi::FromInt(1))); | 1730 // Return a result of -1, 0, or 1, based on EFLAGS. |
| 2057 __ cmov(above, eax, ecx); | 1731 __ j(below, &below_label, Label::kNear); |
| 2058 __ mov(ecx, Immediate(Smi::FromInt(-1))); | 1732 __ j(above, &above_label, Label::kNear); |
| 2059 __ cmov(below, eax, ecx); | 1733 |
| 1734 __ Move(eax, Immediate(0)); |
| 1735 __ ret(0); |
| 1736 |
| 1737 __ bind(&below_label); |
| 1738 __ mov(eax, Immediate(Smi::FromInt(-1))); |
| 1739 __ ret(0); |
| 1740 |
| 1741 __ bind(&above_label); |
| 1742 __ mov(eax, Immediate(Smi::FromInt(1))); |
| 2060 __ ret(0); | 1743 __ ret(0); |
| 2061 | 1744 |
| 2062 // If one of the numbers was NaN, then the result is always false. | 1745 // If one of the numbers was NaN, then the result is always false. |
| 2063 // The cc is never not-equal. | 1746 // The cc is never not-equal. |
| 2064 __ bind(&unordered); | 1747 __ bind(&unordered); |
| 2065 ASSERT(cc != not_equal); | 1748 ASSERT(cc != not_equal); |
| 2066 if (cc == less || cc == less_equal) { | 1749 if (cc == less || cc == less_equal) { |
| 2067 __ mov(eax, Immediate(Smi::FromInt(1))); | 1750 __ mov(eax, Immediate(Smi::FromInt(1))); |
| 2068 } else { | 1751 } else { |
| 2069 __ mov(eax, Immediate(Smi::FromInt(-1))); | 1752 __ mov(eax, Immediate(Smi::FromInt(-1))); |
| (...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2261 __ bind(¬_array_function); | 1944 __ bind(¬_array_function); |
| 2262 } | 1945 } |
| 2263 | 1946 |
| 2264 __ mov(FieldOperand(ebx, edx, times_half_pointer_size, | 1947 __ mov(FieldOperand(ebx, edx, times_half_pointer_size, |
| 2265 FixedArray::kHeaderSize), | 1948 FixedArray::kHeaderSize), |
| 2266 edi); | 1949 edi); |
| 2267 // We won't need edx or ebx anymore, just save edi | 1950 // We won't need edx or ebx anymore, just save edi |
| 2268 __ push(edi); | 1951 __ push(edi); |
| 2269 __ push(ebx); | 1952 __ push(ebx); |
| 2270 __ push(edx); | 1953 __ push(edx); |
| 2271 __ RecordWriteArray(ebx, edi, edx, kDontSaveFPRegs, | 1954 __ RecordWriteArray(ebx, edi, edx, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
| 2272 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | |
| 2273 __ pop(edx); | 1955 __ pop(edx); |
| 2274 __ pop(ebx); | 1956 __ pop(ebx); |
| 2275 __ pop(edi); | 1957 __ pop(edi); |
| 2276 | 1958 |
| 2277 __ bind(&done); | 1959 __ bind(&done); |
| 2278 } | 1960 } |
| 2279 | 1961 |
| 2280 | 1962 |
| 2281 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { | 1963 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { |
| 2282 // Do not transform the receiver for strict mode functions. | 1964 // Do not transform the receiver for strict mode functions. |
| (...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2578 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); | 2260 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); |
| 2579 // It is important that the store buffer overflow stubs are generated first. | 2261 // It is important that the store buffer overflow stubs are generated first. |
| 2580 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); | 2262 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); |
| 2581 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); | 2263 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); |
| 2582 BinaryOpICStub::GenerateAheadOfTime(isolate); | 2264 BinaryOpICStub::GenerateAheadOfTime(isolate); |
| 2583 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); | 2265 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); |
| 2584 } | 2266 } |
| 2585 | 2267 |
| 2586 | 2268 |
| 2587 void CodeStub::GenerateFPStubs(Isolate* isolate) { | 2269 void CodeStub::GenerateFPStubs(Isolate* isolate) { |
| 2588 CEntryStub save_doubles(isolate, 1, kSaveFPRegs); | 2270 // Do nothing. |
| 2589 // Stubs might already be in the snapshot, detect that and don't regenerate, | |
| 2590 // which would lead to code stub initialization state being messed up. | |
| 2591 Code* save_doubles_code; | |
| 2592 if (!save_doubles.FindCodeInCache(&save_doubles_code)) { | |
| 2593 save_doubles_code = *(save_doubles.GetCode()); | |
| 2594 } | |
| 2595 isolate->set_fp_stubs_generated(true); | |
| 2596 } | 2271 } |
| 2597 | 2272 |
| 2598 | 2273 |
| 2599 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { | 2274 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { |
| 2600 CEntryStub stub(isolate, 1, kDontSaveFPRegs); | 2275 CEntryStub stub(isolate, 1); |
| 2601 stub.GetCode(); | 2276 stub.GetCode(); |
| 2602 } | 2277 } |
| 2603 | 2278 |
| 2604 | 2279 |
| 2605 void CEntryStub::Generate(MacroAssembler* masm) { | 2280 void CEntryStub::Generate(MacroAssembler* masm) { |
| 2606 // eax: number of arguments including receiver | 2281 // eax: number of arguments including receiver |
| 2607 // ebx: pointer to C function (C callee-saved) | 2282 // ebx: pointer to C function (C callee-saved) |
| 2608 // ebp: frame pointer (restored after C call) | 2283 // ebp: frame pointer (restored after C call) |
| 2609 // esp: stack pointer (restored after C call) | 2284 // esp: stack pointer (restored after C call) |
| 2610 // esi: current context (C callee-saved) | 2285 // esi: current context (C callee-saved) |
| 2611 // edi: JS function of the caller (C callee-saved) | 2286 // edi: JS function of the caller (C callee-saved) |
| 2612 | 2287 |
| 2613 ProfileEntryHookStub::MaybeCallEntryHook(masm); | 2288 ProfileEntryHookStub::MaybeCallEntryHook(masm); |
| 2614 | 2289 |
| 2615 // Enter the exit frame that transitions from JavaScript to C++. | 2290 // Enter the exit frame that transitions from JavaScript to C++. |
| 2616 __ EnterExitFrame(save_doubles_ == kSaveFPRegs); | 2291 __ EnterExitFrame(); |
| 2617 | 2292 |
| 2618 // ebx: pointer to C function (C callee-saved) | 2293 // ebx: pointer to C function (C callee-saved) |
| 2619 // ebp: frame pointer (restored after C call) | 2294 // ebp: frame pointer (restored after C call) |
| 2620 // esp: stack pointer (restored after C call) | 2295 // esp: stack pointer (restored after C call) |
| 2621 // edi: number of arguments including receiver (C callee-saved) | 2296 // edi: number of arguments including receiver (C callee-saved) |
| 2622 // esi: pointer to the first argument (C callee-saved) | 2297 // esi: pointer to the first argument (C callee-saved) |
| 2623 | 2298 |
| 2624 // Result returned in eax, or eax+edx if result_size_ is 2. | 2299 // Result returned in eax, or eax+edx if result_size_ is 2. |
| 2625 | 2300 |
| 2626 // Check stack alignment. | 2301 // Check stack alignment. |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2662 Label okay; | 2337 Label okay; |
| 2663 __ cmp(edx, Operand::StaticVariable(pending_exception_address)); | 2338 __ cmp(edx, Operand::StaticVariable(pending_exception_address)); |
| 2664 // Cannot use check here as it attempts to generate call into runtime. | 2339 // Cannot use check here as it attempts to generate call into runtime. |
| 2665 __ j(equal, &okay, Label::kNear); | 2340 __ j(equal, &okay, Label::kNear); |
| 2666 __ int3(); | 2341 __ int3(); |
| 2667 __ bind(&okay); | 2342 __ bind(&okay); |
| 2668 __ pop(edx); | 2343 __ pop(edx); |
| 2669 } | 2344 } |
| 2670 | 2345 |
| 2671 // Exit the JavaScript to C++ exit frame. | 2346 // Exit the JavaScript to C++ exit frame. |
| 2672 __ LeaveExitFrame(save_doubles_ == kSaveFPRegs); | 2347 __ LeaveExitFrame(); |
| 2673 __ ret(0); | 2348 __ ret(0); |
| 2674 | 2349 |
| 2675 // Handling of exception. | 2350 // Handling of exception. |
| 2676 __ bind(&exception_returned); | 2351 __ bind(&exception_returned); |
| 2677 | 2352 |
| 2678 // Retrieve the pending exception. | 2353 // Retrieve the pending exception. |
| 2679 __ mov(eax, Operand::StaticVariable(pending_exception_address)); | 2354 __ mov(eax, Operand::StaticVariable(pending_exception_address)); |
| 2680 | 2355 |
| 2681 // Clear the pending exception. | 2356 // Clear the pending exception. |
| 2682 __ mov(edx, Immediate(isolate()->factory()->the_hole_value())); | 2357 __ mov(edx, Immediate(isolate()->factory()->the_hole_value())); |
| (...skipping 1050 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3733 Label unordered, maybe_undefined1, maybe_undefined2; | 3408 Label unordered, maybe_undefined1, maybe_undefined2; |
| 3734 Label miss; | 3409 Label miss; |
| 3735 | 3410 |
| 3736 if (left_ == CompareIC::SMI) { | 3411 if (left_ == CompareIC::SMI) { |
| 3737 __ JumpIfNotSmi(edx, &miss); | 3412 __ JumpIfNotSmi(edx, &miss); |
| 3738 } | 3413 } |
| 3739 if (right_ == CompareIC::SMI) { | 3414 if (right_ == CompareIC::SMI) { |
| 3740 __ JumpIfNotSmi(eax, &miss); | 3415 __ JumpIfNotSmi(eax, &miss); |
| 3741 } | 3416 } |
| 3742 | 3417 |
| 3743 // Load left and right operand. | 3418 // Inlining the double comparison and falling back to the general compare |
| 3744 Label done, left, left_smi, right_smi; | 3419 // stub if NaN is involved or SSE2 or CMOV is unsupported. |
| 3745 __ JumpIfSmi(eax, &right_smi, Label::kNear); | 3420 __ mov(ecx, edx); |
| 3421 __ and_(ecx, eax); |
| 3422 __ JumpIfSmi(ecx, &generic_stub, Label::kNear); |
| 3423 |
| 3746 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), | 3424 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), |
| 3747 isolate()->factory()->heap_number_map()); | 3425 isolate()->factory()->heap_number_map()); |
| 3748 __ j(not_equal, &maybe_undefined1, Label::kNear); | 3426 __ j(not_equal, &maybe_undefined1, Label::kNear); |
| 3749 __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); | |
| 3750 __ jmp(&left, Label::kNear); | |
| 3751 __ bind(&right_smi); | |
| 3752 __ mov(ecx, eax); // Can't clobber eax because we can still jump away. | |
| 3753 __ SmiUntag(ecx); | |
| 3754 __ Cvtsi2sd(xmm1, ecx); | |
| 3755 | |
| 3756 __ bind(&left); | |
| 3757 __ JumpIfSmi(edx, &left_smi, Label::kNear); | |
| 3758 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), | 3427 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), |
| 3759 isolate()->factory()->heap_number_map()); | 3428 isolate()->factory()->heap_number_map()); |
| 3760 __ j(not_equal, &maybe_undefined2, Label::kNear); | 3429 __ j(not_equal, &maybe_undefined2, Label::kNear); |
| 3761 __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); | |
| 3762 __ jmp(&done); | |
| 3763 __ bind(&left_smi); | |
| 3764 __ mov(ecx, edx); // Can't clobber edx because we can still jump away. | |
| 3765 __ SmiUntag(ecx); | |
| 3766 __ Cvtsi2sd(xmm0, ecx); | |
| 3767 | |
| 3768 __ bind(&done); | |
| 3769 // Compare operands. | |
| 3770 __ ucomisd(xmm0, xmm1); | |
| 3771 | |
| 3772 // Don't base result on EFLAGS when a NaN is involved. | |
| 3773 __ j(parity_even, &unordered, Label::kNear); | |
| 3774 | |
| 3775 // Return a result of -1, 0, or 1, based on EFLAGS. | |
| 3776 // Performing mov, because xor would destroy the flag register. | |
| 3777 __ mov(eax, 0); // equal | |
| 3778 __ mov(ecx, Immediate(Smi::FromInt(1))); | |
| 3779 __ cmov(above, eax, ecx); | |
| 3780 __ mov(ecx, Immediate(Smi::FromInt(-1))); | |
| 3781 __ cmov(below, eax, ecx); | |
| 3782 __ ret(0); | |
| 3783 | 3430 |
| 3784 __ bind(&unordered); | 3431 __ bind(&unordered); |
| 3785 __ bind(&generic_stub); | 3432 __ bind(&generic_stub); |
| 3786 ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC, | 3433 ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC, |
| 3787 CompareIC::GENERIC); | 3434 CompareIC::GENERIC); |
| 3788 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); | 3435 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 3789 | 3436 |
| 3790 __ bind(&maybe_undefined1); | 3437 __ bind(&maybe_undefined1); |
| 3791 if (Token::IsOrderedRelationalCompareOp(op_)) { | 3438 if (Token::IsOrderedRelationalCompareOp(op_)) { |
| 3792 __ cmp(eax, Immediate(isolate()->factory()->undefined_value())); | 3439 __ cmp(eax, Immediate(isolate()->factory()->undefined_value())); |
| (...skipping 469 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4262 | 3909 |
| 4263 __ bind(¬_in_dictionary); | 3910 __ bind(¬_in_dictionary); |
| 4264 __ mov(result_, Immediate(0)); | 3911 __ mov(result_, Immediate(0)); |
| 4265 __ Drop(1); | 3912 __ Drop(1); |
| 4266 __ ret(2 * kPointerSize); | 3913 __ ret(2 * kPointerSize); |
| 4267 } | 3914 } |
| 4268 | 3915 |
| 4269 | 3916 |
| 4270 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( | 3917 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( |
| 4271 Isolate* isolate) { | 3918 Isolate* isolate) { |
| 4272 StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs); | 3919 StoreBufferOverflowStub stub(isolate); |
| 4273 stub.GetCode(); | 3920 stub.GetCode(); |
| 4274 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); | |
| 4275 stub2.GetCode(); | |
| 4276 } | 3921 } |
| 4277 | 3922 |
| 4278 | 3923 |
| 4279 // Takes the input in 3 registers: address_ value_ and object_. A pointer to | 3924 // Takes the input in 3 registers: address_ value_ and object_. A pointer to |
| 4280 // the value has just been written into the object, now this stub makes sure | 3925 // the value has just been written into the object, now this stub makes sure |
| 4281 // we keep the GC informed. The word in the object where the value has been | 3926 // we keep the GC informed. The word in the object where the value has been |
| 4282 // written is in the address register. | 3927 // written is in the address register. |
| 4283 void RecordWriteStub::Generate(MacroAssembler* masm) { | 3928 void RecordWriteStub::Generate(MacroAssembler* masm) { |
| 4284 Label skip_to_incremental_noncompacting; | 3929 Label skip_to_incremental_noncompacting; |
| 4285 Label skip_to_incremental_compacting; | 3930 Label skip_to_incremental_compacting; |
| 4286 | 3931 |
| 4287 // The first two instructions are generated with labels so as to get the | 3932 // The first two instructions are generated with labels so as to get the |
| 4288 // offset fixed up correctly by the bind(Label*) call. We patch it back and | 3933 // offset fixed up correctly by the bind(Label*) call. We patch it back and |
| 4289 // forth between a compare instructions (a nop in this position) and the | 3934 // forth between a compare instructions (a nop in this position) and the |
| 4290 // real branch when we start and stop incremental heap marking. | 3935 // real branch when we start and stop incremental heap marking. |
| 4291 __ jmp(&skip_to_incremental_noncompacting, Label::kNear); | 3936 __ jmp(&skip_to_incremental_noncompacting, Label::kNear); |
| 4292 __ jmp(&skip_to_incremental_compacting, Label::kFar); | 3937 __ jmp(&skip_to_incremental_compacting, Label::kFar); |
| 4293 | 3938 |
| 4294 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | 3939 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { |
| 4295 __ RememberedSetHelper(object_, | 3940 __ RememberedSetHelper(object_, |
| 4296 address_, | 3941 address_, |
| 4297 value_, | 3942 value_, |
| 4298 save_fp_regs_mode_, | |
| 4299 MacroAssembler::kReturnAtEnd); | 3943 MacroAssembler::kReturnAtEnd); |
| 4300 } else { | 3944 } else { |
| 4301 __ ret(0); | 3945 __ ret(0); |
| 4302 } | 3946 } |
| 4303 | 3947 |
| 4304 __ bind(&skip_to_incremental_noncompacting); | 3948 __ bind(&skip_to_incremental_noncompacting); |
| 4305 GenerateIncremental(masm, INCREMENTAL); | 3949 GenerateIncremental(masm, INCREMENTAL); |
| 4306 | 3950 |
| 4307 __ bind(&skip_to_incremental_compacting); | 3951 __ bind(&skip_to_incremental_compacting); |
| 4308 GenerateIncremental(masm, INCREMENTAL_COMPACTION); | 3952 GenerateIncremental(masm, INCREMENTAL_COMPACTION); |
| (...skipping 26 matching lines...) Expand all Loading... |
| 4335 // remembered set. | 3979 // remembered set. |
| 4336 CheckNeedsToInformIncrementalMarker( | 3980 CheckNeedsToInformIncrementalMarker( |
| 4337 masm, | 3981 masm, |
| 4338 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, | 3982 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, |
| 4339 mode); | 3983 mode); |
| 4340 InformIncrementalMarker(masm); | 3984 InformIncrementalMarker(masm); |
| 4341 regs_.Restore(masm); | 3985 regs_.Restore(masm); |
| 4342 __ RememberedSetHelper(object_, | 3986 __ RememberedSetHelper(object_, |
| 4343 address_, | 3987 address_, |
| 4344 value_, | 3988 value_, |
| 4345 save_fp_regs_mode_, | |
| 4346 MacroAssembler::kReturnAtEnd); | 3989 MacroAssembler::kReturnAtEnd); |
| 4347 | 3990 |
| 4348 __ bind(&dont_need_remembered_set); | 3991 __ bind(&dont_need_remembered_set); |
| 4349 } | 3992 } |
| 4350 | 3993 |
| 4351 CheckNeedsToInformIncrementalMarker( | 3994 CheckNeedsToInformIncrementalMarker( |
| 4352 masm, | 3995 masm, |
| 4353 kReturnOnNoNeedToInformIncrementalMarker, | 3996 kReturnOnNoNeedToInformIncrementalMarker, |
| 4354 mode); | 3997 mode); |
| 4355 InformIncrementalMarker(masm); | 3998 InformIncrementalMarker(masm); |
| 4356 regs_.Restore(masm); | 3999 regs_.Restore(masm); |
| 4357 __ ret(0); | 4000 __ ret(0); |
| 4358 } | 4001 } |
| 4359 | 4002 |
| 4360 | 4003 |
| 4361 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { | 4004 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { |
| 4362 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); | 4005 regs_.SaveCallerSaveRegisters(masm); |
| 4363 int argument_count = 3; | 4006 int argument_count = 3; |
| 4364 __ PrepareCallCFunction(argument_count, regs_.scratch0()); | 4007 __ PrepareCallCFunction(argument_count, regs_.scratch0()); |
| 4365 __ mov(Operand(esp, 0 * kPointerSize), regs_.object()); | 4008 __ mov(Operand(esp, 0 * kPointerSize), regs_.object()); |
| 4366 __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot. | 4009 __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot. |
| 4367 __ mov(Operand(esp, 2 * kPointerSize), | 4010 __ mov(Operand(esp, 2 * kPointerSize), |
| 4368 Immediate(ExternalReference::isolate_address(isolate()))); | 4011 Immediate(ExternalReference::isolate_address(isolate()))); |
| 4369 | 4012 |
| 4370 AllowExternalCallThatCantCauseGC scope(masm); | 4013 AllowExternalCallThatCantCauseGC scope(masm); |
| 4371 __ CallCFunction( | 4014 __ CallCFunction( |
| 4372 ExternalReference::incremental_marking_record_write_function(isolate()), | 4015 ExternalReference::incremental_marking_record_write_function(isolate()), |
| 4373 argument_count); | 4016 argument_count); |
| 4374 | 4017 |
| 4375 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); | 4018 regs_.RestoreCallerSaveRegisters(masm); |
| 4376 } | 4019 } |
| 4377 | 4020 |
| 4378 | 4021 |
| 4379 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( | 4022 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( |
| 4380 MacroAssembler* masm, | 4023 MacroAssembler* masm, |
| 4381 OnNoNeedToInformIncrementalMarker on_no_need, | 4024 OnNoNeedToInformIncrementalMarker on_no_need, |
| 4382 Mode mode) { | 4025 Mode mode) { |
| 4383 Label object_is_black, need_incremental, need_incremental_pop_object; | 4026 Label object_is_black, need_incremental, need_incremental_pop_object; |
| 4384 | 4027 |
| 4385 __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask)); | 4028 __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask)); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 4399 regs_.scratch0(), | 4042 regs_.scratch0(), |
| 4400 regs_.scratch1(), | 4043 regs_.scratch1(), |
| 4401 &object_is_black, | 4044 &object_is_black, |
| 4402 Label::kNear); | 4045 Label::kNear); |
| 4403 | 4046 |
| 4404 regs_.Restore(masm); | 4047 regs_.Restore(masm); |
| 4405 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { | 4048 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { |
| 4406 __ RememberedSetHelper(object_, | 4049 __ RememberedSetHelper(object_, |
| 4407 address_, | 4050 address_, |
| 4408 value_, | 4051 value_, |
| 4409 save_fp_regs_mode_, | |
| 4410 MacroAssembler::kReturnAtEnd); | 4052 MacroAssembler::kReturnAtEnd); |
| 4411 } else { | 4053 } else { |
| 4412 __ ret(0); | 4054 __ ret(0); |
| 4413 } | 4055 } |
| 4414 | 4056 |
| 4415 __ bind(&object_is_black); | 4057 __ bind(&object_is_black); |
| 4416 | 4058 |
| 4417 // Get the value from the slot. | 4059 // Get the value from the slot. |
| 4418 __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); | 4060 __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); |
| 4419 | 4061 |
| (...skipping 27 matching lines...) Expand all Loading... |
| 4447 regs_.object(), // Scratch. | 4089 regs_.object(), // Scratch. |
| 4448 &need_incremental_pop_object, | 4090 &need_incremental_pop_object, |
| 4449 Label::kNear); | 4091 Label::kNear); |
| 4450 __ pop(regs_.object()); | 4092 __ pop(regs_.object()); |
| 4451 | 4093 |
| 4452 regs_.Restore(masm); | 4094 regs_.Restore(masm); |
| 4453 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { | 4095 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { |
| 4454 __ RememberedSetHelper(object_, | 4096 __ RememberedSetHelper(object_, |
| 4455 address_, | 4097 address_, |
| 4456 value_, | 4098 value_, |
| 4457 save_fp_regs_mode_, | |
| 4458 MacroAssembler::kReturnAtEnd); | 4099 MacroAssembler::kReturnAtEnd); |
| 4459 } else { | 4100 } else { |
| 4460 __ ret(0); | 4101 __ ret(0); |
| 4461 } | 4102 } |
| 4462 | 4103 |
| 4463 __ bind(&need_incremental_pop_object); | 4104 __ bind(&need_incremental_pop_object); |
| 4464 __ pop(regs_.object()); | 4105 __ pop(regs_.object()); |
| 4465 | 4106 |
| 4466 __ bind(&need_incremental); | 4107 __ bind(&need_incremental); |
| 4467 | 4108 |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4518 __ jmp(&slow_elements); | 4159 __ jmp(&slow_elements); |
| 4519 | 4160 |
| 4520 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. | 4161 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. |
| 4521 __ bind(&fast_elements); | 4162 __ bind(&fast_elements); |
| 4522 __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); | 4163 __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); |
| 4523 __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size, | 4164 __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size, |
| 4524 FixedArrayBase::kHeaderSize)); | 4165 FixedArrayBase::kHeaderSize)); |
| 4525 __ mov(Operand(ecx, 0), eax); | 4166 __ mov(Operand(ecx, 0), eax); |
| 4526 // Update the write barrier for the array store. | 4167 // Update the write barrier for the array store. |
| 4527 __ RecordWrite(ebx, ecx, eax, | 4168 __ RecordWrite(ebx, ecx, eax, |
| 4528 kDontSaveFPRegs, | |
| 4529 EMIT_REMEMBERED_SET, | 4169 EMIT_REMEMBERED_SET, |
| 4530 OMIT_SMI_CHECK); | 4170 OMIT_SMI_CHECK); |
| 4531 __ ret(0); | 4171 __ ret(0); |
| 4532 | 4172 |
| 4533 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, | 4173 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, |
| 4534 // and value is Smi. | 4174 // and value is Smi. |
| 4535 __ bind(&smi_element); | 4175 __ bind(&smi_element); |
| 4536 __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); | 4176 __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); |
| 4537 __ mov(FieldOperand(ebx, ecx, times_half_pointer_size, | 4177 __ mov(FieldOperand(ebx, ecx, times_half_pointer_size, |
| 4538 FixedArrayBase::kHeaderSize), eax); | 4178 FixedArrayBase::kHeaderSize), eax); |
| 4539 __ ret(0); | 4179 __ ret(0); |
| 4540 | 4180 |
| 4541 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. | 4181 // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. |
| 4542 __ bind(&double_elements); | 4182 __ bind(&double_elements); |
| 4543 | 4183 |
| 4544 __ push(edx); | 4184 __ push(edx); |
| 4545 __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset)); | 4185 __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset)); |
| 4546 __ StoreNumberToDoubleElements(eax, | 4186 __ StoreNumberToDoubleElements(eax, |
| 4547 edx, | 4187 edx, |
| 4548 ecx, | 4188 ecx, |
| 4549 edi, | 4189 edi, |
| 4550 xmm0, | 4190 &slow_elements_from_double, |
| 4551 &slow_elements_from_double); | 4191 false); |
| 4552 __ pop(edx); | 4192 __ pop(edx); |
| 4553 __ ret(0); | 4193 __ ret(0); |
| 4554 } | 4194 } |
| 4555 | 4195 |
| 4556 | 4196 |
| 4557 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | 4197 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { |
| 4558 CEntryStub ces(isolate(), 1, kSaveFPRegs); | 4198 CEntryStub ces(isolate(), 1); |
| 4559 __ call(ces.GetCode(), RelocInfo::CODE_TARGET); | 4199 __ call(ces.GetCode(), RelocInfo::CODE_TARGET); |
| 4560 int parameter_count_offset = | 4200 int parameter_count_offset = |
| 4561 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | 4201 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
| 4562 __ mov(ebx, MemOperand(ebp, parameter_count_offset)); | 4202 __ mov(ebx, MemOperand(ebp, parameter_count_offset)); |
| 4563 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); | 4203 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); |
| 4564 __ pop(ecx); | 4204 __ pop(ecx); |
| 4565 int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE | 4205 int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE |
| 4566 ? kPointerSize | 4206 ? kPointerSize |
| 4567 : 0; | 4207 : 0; |
| 4568 __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset)); | 4208 __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset)); |
| (...skipping 505 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5074 kStackSpace, | 4714 kStackSpace, |
| 5075 Operand(ebp, 7 * kPointerSize), | 4715 Operand(ebp, 7 * kPointerSize), |
| 5076 NULL); | 4716 NULL); |
| 5077 } | 4717 } |
| 5078 | 4718 |
| 5079 | 4719 |
| 5080 #undef __ | 4720 #undef __ |
| 5081 | 4721 |
| 5082 } } // namespace v8::internal | 4722 } } // namespace v8::internal |
| 5083 | 4723 |
| 5084 #endif // V8_TARGET_ARCH_IA32 | 4724 #endif // V8_TARGET_ARCH_X87 |
| OLD | NEW |