Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(247)

Side by Side Diff: src/x64/lithium-codegen-x64.cc

Issue 153913002: A64: Synchronize with r16756. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/lithium-codegen-x64.h ('k') | src/x64/lithium-x64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 2781 matching lines...) Expand 10 before | Expand all | Expand 10 after
2792 2792
2793 __ bind(&skip_assignment); 2793 __ bind(&skip_assignment);
2794 } 2794 }
2795 2795
2796 2796
2797 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 2797 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2798 HObjectAccess access = instr->hydrogen()->access(); 2798 HObjectAccess access = instr->hydrogen()->access();
2799 int offset = access.offset(); 2799 int offset = access.offset();
2800 2800
2801 if (access.IsExternalMemory()) { 2801 if (access.IsExternalMemory()) {
2802 ASSERT(!access.representation().IsInteger32());
2802 Register result = ToRegister(instr->result()); 2803 Register result = ToRegister(instr->result());
2803 if (instr->object()->IsConstantOperand()) { 2804 if (instr->object()->IsConstantOperand()) {
2804 ASSERT(result.is(rax)); 2805 ASSERT(result.is(rax));
2805 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object()))); 2806 __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2806 } else { 2807 } else {
2807 Register object = ToRegister(instr->object()); 2808 Register object = ToRegister(instr->object());
2808 __ movq(result, MemOperand(object, offset)); 2809 __ movq(result, MemOperand(object, offset));
2809 } 2810 }
2810 return; 2811 return;
2811 } 2812 }
2812 2813
2813 Register object = ToRegister(instr->object()); 2814 Register object = ToRegister(instr->object());
2814 if (FLAG_track_double_fields && 2815 if (FLAG_track_double_fields &&
2815 instr->hydrogen()->representation().IsDouble()) { 2816 instr->hydrogen()->representation().IsDouble()) {
2816 XMMRegister result = ToDoubleRegister(instr->result()); 2817 XMMRegister result = ToDoubleRegister(instr->result());
2817 __ movsd(result, FieldOperand(object, offset)); 2818 __ movsd(result, FieldOperand(object, offset));
2818 return; 2819 return;
2819 } 2820 }
2820 2821
2821 Register result = ToRegister(instr->result()); 2822 Register result = ToRegister(instr->result());
2822 if (access.IsInobject()) { 2823 if (access.IsInobject()) {
2823 __ movq(result, FieldOperand(object, offset)); 2824 if (access.representation().IsInteger32()) {
2825 __ movl(result, FieldOperand(object, offset));
2826 } else {
2827 __ movq(result, FieldOperand(object, offset));
2828 }
2824 } else { 2829 } else {
2825 __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset)); 2830 __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
2826 __ movq(result, FieldOperand(result, offset)); 2831 if (access.representation().IsInteger32()) {
2832 __ movl(result, FieldOperand(result, offset));
2833 } else {
2834 __ movq(result, FieldOperand(result, offset));
2835 }
2827 } 2836 }
2828 } 2837 }
2829 2838
2830 2839
2831 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 2840 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2832 ASSERT(ToRegister(instr->object()).is(rax)); 2841 ASSERT(ToRegister(instr->object()).is(rax));
2833 ASSERT(ToRegister(instr->result()).is(rax)); 2842 ASSERT(ToRegister(instr->result()).is(rax));
2834 2843
2835 __ Move(rcx, instr->name()); 2844 __ Move(rcx, instr->name());
2836 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); 2845 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
(...skipping 676 matching lines...) Expand 10 before | Expand all | Expand 10 after
3513 __ cvttsd2si(output_reg, input_reg); 3522 __ cvttsd2si(output_reg, input_reg);
3514 // Overflow is signalled with minint. 3523 // Overflow is signalled with minint.
3515 __ cmpl(output_reg, Immediate(0x80000000)); 3524 __ cmpl(output_reg, Immediate(0x80000000));
3516 DeoptimizeIf(equal, instr->environment()); 3525 DeoptimizeIf(equal, instr->environment());
3517 __ jmp(&done, Label::kNear); 3526 __ jmp(&done, Label::kNear);
3518 3527
3519 // Non-zero negative reaches here. 3528 // Non-zero negative reaches here.
3520 __ bind(&negative_sign); 3529 __ bind(&negative_sign);
3521 // Truncate, then compare and compensate. 3530 // Truncate, then compare and compensate.
3522 __ cvttsd2si(output_reg, input_reg); 3531 __ cvttsd2si(output_reg, input_reg);
3523 __ cvtlsi2sd(xmm_scratch, output_reg); 3532 __ Cvtlsi2sd(xmm_scratch, output_reg);
3524 __ ucomisd(input_reg, xmm_scratch); 3533 __ ucomisd(input_reg, xmm_scratch);
3525 __ j(equal, &done, Label::kNear); 3534 __ j(equal, &done, Label::kNear);
3526 __ subl(output_reg, Immediate(1)); 3535 __ subl(output_reg, Immediate(1));
3527 DeoptimizeIf(overflow, instr->environment()); 3536 DeoptimizeIf(overflow, instr->environment());
3528 3537
3529 __ bind(&done); 3538 __ bind(&done);
3530 } 3539 }
3531 } 3540 }
3532 3541
3533 3542
(...skipping 28 matching lines...) Expand all
3562 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then 3571 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3563 // compare and compensate. 3572 // compare and compensate.
3564 __ movq(kScratchRegister, input_reg); // Back up input_reg. 3573 __ movq(kScratchRegister, input_reg); // Back up input_reg.
3565 __ subsd(input_reg, xmm_scratch); 3574 __ subsd(input_reg, xmm_scratch);
3566 __ cvttsd2si(output_reg, input_reg); 3575 __ cvttsd2si(output_reg, input_reg);
3567 // Catch minint due to overflow, and to prevent overflow when compensating. 3576 // Catch minint due to overflow, and to prevent overflow when compensating.
3568 __ cmpl(output_reg, Immediate(0x80000000)); 3577 __ cmpl(output_reg, Immediate(0x80000000));
3569 __ RecordComment("D2I conversion overflow"); 3578 __ RecordComment("D2I conversion overflow");
3570 DeoptimizeIf(equal, instr->environment()); 3579 DeoptimizeIf(equal, instr->environment());
3571 3580
3572 __ cvtlsi2sd(xmm_scratch, output_reg); 3581 __ Cvtlsi2sd(xmm_scratch, output_reg);
3573 __ ucomisd(input_reg, xmm_scratch); 3582 __ ucomisd(input_reg, xmm_scratch);
3574 __ j(equal, &restore, Label::kNear); 3583 __ j(equal, &restore, Label::kNear);
3575 __ subl(output_reg, Immediate(1)); 3584 __ subl(output_reg, Immediate(1));
3576 // No overflow because we already ruled out minint. 3585 // No overflow because we already ruled out minint.
3577 __ bind(&restore); 3586 __ bind(&restore);
3578 __ movq(input_reg, kScratchRegister); // Restore input_reg. 3587 __ movq(input_reg, kScratchRegister); // Restore input_reg.
3579 __ jmp(&done); 3588 __ jmp(&done);
3580 3589
3581 __ bind(&round_to_zero); 3590 __ bind(&round_to_zero);
3582 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if 3591 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
3660 __ CallStub(&stub); 3669 __ CallStub(&stub);
3661 } else { 3670 } else {
3662 ASSERT(exponent_type.IsDouble()); 3671 ASSERT(exponent_type.IsDouble());
3663 MathPowStub stub(MathPowStub::DOUBLE); 3672 MathPowStub stub(MathPowStub::DOUBLE);
3664 __ CallStub(&stub); 3673 __ CallStub(&stub);
3665 } 3674 }
3666 } 3675 }
3667 3676
3668 3677
3669 void LCodeGen::DoRandom(LRandom* instr) { 3678 void LCodeGen::DoRandom(LRandom* instr) {
3670 class DeferredDoRandom V8_FINAL : public LDeferredCode { 3679 // Assert that register size is twice the size of each seed.
3671 public:
3672 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3673 : LDeferredCode(codegen), instr_(instr) { }
3674 virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredRandom(instr_); }
3675 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3676 private:
3677 LRandom* instr_;
3678 };
3679
3680 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3681
3682 // Having marked this instruction as a call we can use any
3683 // registers.
3684 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3685
3686 // Choose the right register for the first argument depending on
3687 // calling convention.
3688 #ifdef _WIN64
3689 ASSERT(ToRegister(instr->global_object()).is(rcx));
3690 Register global_object = rcx;
3691 #else
3692 ASSERT(ToRegister(instr->global_object()).is(rdi));
3693 Register global_object = rdi;
3694 #endif
3695
3696 static const int kSeedSize = sizeof(uint32_t); 3680 static const int kSeedSize = sizeof(uint32_t);
3697 STATIC_ASSERT(kPointerSize == 2 * kSeedSize); 3681 STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
3698 3682
3699 __ movq(global_object, 3683 // Load native context
3700 FieldOperand(global_object, GlobalObject::kNativeContextOffset)); 3684 Register global_object = ToRegister(instr->global_object());
3685 Register native_context = global_object;
3686 __ movq(native_context, FieldOperand(
3687 global_object, GlobalObject::kNativeContextOffset));
3688
3689 // Load state (FixedArray of the native context's random seeds)
3701 static const int kRandomSeedOffset = 3690 static const int kRandomSeedOffset =
3702 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize; 3691 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3703 __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset)); 3692 Register state = native_context;
3704 // rbx: FixedArray of the native context's random seeds 3693 __ movq(state, FieldOperand(native_context, kRandomSeedOffset));
3705 3694
3706 // Load state[0]. 3695 // Load state[0].
3707 __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize)); 3696 Register state0 = ToRegister(instr->scratch());
3708 // If state[0] == 0, call runtime to initialize seeds. 3697 __ movl(state0, FieldOperand(state, ByteArray::kHeaderSize));
3709 __ testl(rax, rax);
3710 __ j(zero, deferred->entry());
3711 // Load state[1]. 3698 // Load state[1].
3712 __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize)); 3699 Register state1 = ToRegister(instr->scratch2());
3700 __ movl(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
3713 3701
3714 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16) 3702 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3715 // Only operate on the lower 32 bit of rax. 3703 Register scratch3 = ToRegister(instr->scratch3());
3716 __ movzxwl(rdx, rax); 3704 __ movzxwl(scratch3, state0);
3717 __ imull(rdx, rdx, Immediate(18273)); 3705 __ imull(scratch3, scratch3, Immediate(18273));
3718 __ shrl(rax, Immediate(16)); 3706 __ shrl(state0, Immediate(16));
3719 __ addl(rax, rdx); 3707 __ addl(state0, scratch3);
3720 // Save state[0]. 3708 // Save state[0].
3721 __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax); 3709 __ movl(FieldOperand(state, ByteArray::kHeaderSize), state0);
3722 3710
3723 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16) 3711 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3724 __ movzxwl(rdx, rcx); 3712 __ movzxwl(scratch3, state1);
3725 __ imull(rdx, rdx, Immediate(36969)); 3713 __ imull(scratch3, scratch3, Immediate(36969));
3726 __ shrl(rcx, Immediate(16)); 3714 __ shrl(state1, Immediate(16));
3727 __ addl(rcx, rdx); 3715 __ addl(state1, scratch3);
3728 // Save state[1]. 3716 // Save state[1].
3729 __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx); 3717 __ movl(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
3730 3718
3731 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF) 3719 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3732 __ shll(rax, Immediate(14)); 3720 Register random = state0;
3733 __ andl(rcx, Immediate(0x3FFFF)); 3721 __ shll(random, Immediate(14));
3734 __ addl(rax, rcx); 3722 __ andl(state1, Immediate(0x3FFFF));
3723 __ addl(random, state1);
3735 3724
3736 __ bind(deferred->exit());
3737 // Convert 32 random bits in rax to 0.(32 random bits) in a double 3725 // Convert 32 random bits in rax to 0.(32 random bits) in a double
3738 // by computing: 3726 // by computing:
3739 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). 3727 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
3740 __ movq(rcx, V8_INT64_C(0x4130000000000000), 3728 XMMRegister result = ToDoubleRegister(instr->result());
3729 // We use xmm0 as fixed scratch register here.
3730 XMMRegister scratch4 = xmm0;
3731 __ movq(scratch3, V8_INT64_C(0x4130000000000000),
3741 RelocInfo::NONE64); // 1.0 x 2^20 as double 3732 RelocInfo::NONE64); // 1.0 x 2^20 as double
3742 __ movq(xmm2, rcx); 3733 __ movq(scratch4, scratch3);
3743 __ movd(xmm1, rax); 3734 __ movd(result, random);
3744 __ xorps(xmm1, xmm2); 3735 __ xorps(result, scratch4);
3745 __ subsd(xmm1, xmm2); 3736 __ subsd(result, scratch4);
3746 } 3737 }
3747 3738
3748 3739
3749 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3750 __ PrepareCallCFunction(1);
3751 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3752 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
3753 // Return value is in rax.
3754 }
3755
3756
3757 void LCodeGen::DoMathExp(LMathExp* instr) { 3740 void LCodeGen::DoMathExp(LMathExp* instr) {
3758 XMMRegister input = ToDoubleRegister(instr->value()); 3741 XMMRegister input = ToDoubleRegister(instr->value());
3759 XMMRegister result = ToDoubleRegister(instr->result()); 3742 XMMRegister result = ToDoubleRegister(instr->result());
3760 Register temp1 = ToRegister(instr->temp1()); 3743 Register temp1 = ToRegister(instr->temp1());
3761 Register temp2 = ToRegister(instr->temp2()); 3744 Register temp2 = ToRegister(instr->temp2());
3762 3745
3763 MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2); 3746 MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
3764 } 3747 }
3765 3748
3766 3749
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after
3955 } 3938 }
3956 3939
3957 3940
3958 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 3941 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3959 Representation representation = instr->representation(); 3942 Representation representation = instr->representation();
3960 3943
3961 HObjectAccess access = instr->hydrogen()->access(); 3944 HObjectAccess access = instr->hydrogen()->access();
3962 int offset = access.offset(); 3945 int offset = access.offset();
3963 3946
3964 if (access.IsExternalMemory()) { 3947 if (access.IsExternalMemory()) {
3948 ASSERT(!access.representation().IsInteger32());
3965 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 3949 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3966 Register value = ToRegister(instr->value()); 3950 Register value = ToRegister(instr->value());
3967 if (instr->object()->IsConstantOperand()) { 3951 if (instr->object()->IsConstantOperand()) {
3968 ASSERT(value.is(rax)); 3952 ASSERT(value.is(rax));
3969 LConstantOperand* object = LConstantOperand::cast(instr->object()); 3953 LConstantOperand* object = LConstantOperand::cast(instr->object());
3970 __ store_rax(ToExternalReference(object)); 3954 __ store_rax(ToExternalReference(object));
3971 } else { 3955 } else {
3972 Register object = ToRegister(instr->object()); 3956 Register object = ToRegister(instr->object());
3973 __ movq(MemOperand(object, offset), value); 3957 __ movq(MemOperand(object, offset), value);
3974 } 3958 }
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
4032 4016
4033 Register write_register = object; 4017 Register write_register = object;
4034 if (!access.IsInobject()) { 4018 if (!access.IsInobject()) {
4035 write_register = ToRegister(instr->temp()); 4019 write_register = ToRegister(instr->temp());
4036 __ movq(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); 4020 __ movq(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4037 } 4021 }
4038 4022
4039 if (instr->value()->IsConstantOperand()) { 4023 if (instr->value()->IsConstantOperand()) {
4040 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); 4024 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4041 if (operand_value->IsRegister()) { 4025 if (operand_value->IsRegister()) {
4042 __ movq(FieldOperand(write_register, offset), 4026 if (access.representation().IsInteger32()) {
4043 ToRegister(operand_value)); 4027 __ movl(FieldOperand(write_register, offset),
4028 ToRegister(operand_value));
4029 } else {
4030 __ movq(FieldOperand(write_register, offset),
4031 ToRegister(operand_value));
4032 }
4044 } else { 4033 } else {
4045 Handle<Object> handle_value = ToHandle(operand_value); 4034 Handle<Object> handle_value = ToHandle(operand_value);
4046 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 4035 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4047 __ Move(FieldOperand(write_register, offset), handle_value); 4036 __ Move(FieldOperand(write_register, offset), handle_value);
4048 } 4037 }
4049 } else { 4038 } else {
4050 __ movq(FieldOperand(write_register, offset), ToRegister(instr->value())); 4039 if (access.representation().IsInteger32()) {
4040 __ movl(FieldOperand(write_register, offset), ToRegister(instr->value()));
4041 } else {
4042 __ movq(FieldOperand(write_register, offset), ToRegister(instr->value()));
4043 }
4051 } 4044 }
4052 4045
4053 if (instr->hydrogen()->NeedsWriteBarrier()) { 4046 if (instr->hydrogen()->NeedsWriteBarrier()) {
4054 Register value = ToRegister(instr->value()); 4047 Register value = ToRegister(instr->value());
4055 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; 4048 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4056 // Update the write barrier for the object for in-object properties. 4049 // Update the write barrier for the object for in-object properties.
4057 __ RecordWriteField(write_register, 4050 __ RecordWriteField(write_register,
4058 offset, 4051 offset,
4059 value, 4052 value,
4060 temp, 4053 temp,
(...skipping 407 matching lines...) Expand 10 before | Expand all | Expand 10 after
4468 __ StoreToSafepointRegisterSlot(result, rax); 4461 __ StoreToSafepointRegisterSlot(result, rax);
4469 } 4462 }
4470 4463
4471 4464
4472 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4465 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4473 LOperand* input = instr->value(); 4466 LOperand* input = instr->value();
4474 ASSERT(input->IsRegister() || input->IsStackSlot()); 4467 ASSERT(input->IsRegister() || input->IsStackSlot());
4475 LOperand* output = instr->result(); 4468 LOperand* output = instr->result();
4476 ASSERT(output->IsDoubleRegister()); 4469 ASSERT(output->IsDoubleRegister());
4477 if (input->IsRegister()) { 4470 if (input->IsRegister()) {
4478 __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input)); 4471 __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
4479 } else { 4472 } else {
4480 __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input)); 4473 __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
4481 } 4474 }
4482 } 4475 }
4483 4476
4484 4477
4485 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) { 4478 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
4486 LOperand* input = instr->value(); 4479 LOperand* input = instr->value();
4487 ASSERT(input->IsRegister()); 4480 ASSERT(input->IsRegister());
4488 LOperand* output = instr->result(); 4481 LOperand* output = instr->result();
4489 __ Integer32ToSmi(ToRegister(output), ToRegister(input)); 4482 __ Integer32ToSmi(ToRegister(output), ToRegister(input));
4490 if (!instr->hydrogen()->value()->HasRange() || 4483 if (!instr->hydrogen()->value()->HasRange() ||
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
4642 __ SmiToInteger32(input, input); 4635 __ SmiToInteger32(input, input);
4643 } 4636 }
4644 4637
4645 4638
4646 void LCodeGen::EmitNumberUntagD(Register input_reg, 4639 void LCodeGen::EmitNumberUntagD(Register input_reg,
4647 XMMRegister result_reg, 4640 XMMRegister result_reg,
4648 bool can_convert_undefined_to_nan, 4641 bool can_convert_undefined_to_nan,
4649 bool deoptimize_on_minus_zero, 4642 bool deoptimize_on_minus_zero,
4650 LEnvironment* env, 4643 LEnvironment* env,
4651 NumberUntagDMode mode) { 4644 NumberUntagDMode mode) {
4652 Label load_smi, done; 4645 Label convert, load_smi, done;
4653 4646
4654 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 4647 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4655 // Smi check. 4648 // Smi check.
4656 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); 4649 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4657 4650
4658 // Heap number map check. 4651 // Heap number map check.
4659 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), 4652 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4660 Heap::kHeapNumberMapRootIndex); 4653 Heap::kHeapNumberMapRootIndex);
4661 if (!can_convert_undefined_to_nan) { 4654
4655 // On x64 it is safe to load at heap number offset before evaluating the map
4656 // check, since all heap objects are at least two words long.
4657 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4658
4659 if (can_convert_undefined_to_nan) {
4660 __ j(not_equal, &convert);
4661 } else {
4662 DeoptimizeIf(not_equal, env); 4662 DeoptimizeIf(not_equal, env);
4663 } else { 4663 }
4664 Label heap_number, convert;
4665 __ j(equal, &heap_number, Label::kNear);
4666 4664
4667 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
4668 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4669 DeoptimizeIf(not_equal, env);
4670
4671 __ bind(&convert);
4672 __ xorps(result_reg, result_reg);
4673 __ divsd(result_reg, result_reg);
4674 __ jmp(&done, Label::kNear);
4675
4676 __ bind(&heap_number);
4677 }
4678 // Heap number to XMM conversion.
4679 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4680 if (deoptimize_on_minus_zero) { 4665 if (deoptimize_on_minus_zero) {
4681 XMMRegister xmm_scratch = xmm0; 4666 XMMRegister xmm_scratch = xmm0;
4682 __ xorps(xmm_scratch, xmm_scratch); 4667 __ xorps(xmm_scratch, xmm_scratch);
4683 __ ucomisd(xmm_scratch, result_reg); 4668 __ ucomisd(xmm_scratch, result_reg);
4684 __ j(not_equal, &done, Label::kNear); 4669 __ j(not_equal, &done, Label::kNear);
4685 __ movmskpd(kScratchRegister, result_reg); 4670 __ movmskpd(kScratchRegister, result_reg);
4686 __ testq(kScratchRegister, Immediate(1)); 4671 __ testq(kScratchRegister, Immediate(1));
4687 DeoptimizeIf(not_zero, env); 4672 DeoptimizeIf(not_zero, env);
4688 } 4673 }
4689 __ jmp(&done, Label::kNear); 4674 __ jmp(&done, Label::kNear);
4675
4676 if (can_convert_undefined_to_nan) {
4677 __ bind(&convert);
4678
4679 // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
4680 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4681 DeoptimizeIf(not_equal, env);
4682
4683 __ xorps(result_reg, result_reg);
4684 __ divsd(result_reg, result_reg);
4685 __ jmp(&done, Label::kNear);
4686 }
4690 } else { 4687 } else {
4691 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); 4688 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4692 } 4689 }
4693 4690
4694 // Smi to XMM conversion 4691 // Smi to XMM conversion
4695 __ bind(&load_smi); 4692 __ bind(&load_smi);
4696 __ SmiToInteger32(kScratchRegister, input_reg); 4693 __ SmiToInteger32(kScratchRegister, input_reg);
4697 __ cvtlsi2sd(result_reg, kScratchRegister); 4694 __ Cvtlsi2sd(result_reg, kScratchRegister);
4698 __ bind(&done); 4695 __ bind(&done);
4699 } 4696 }
4700 4697
4701 4698
4702 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { 4699 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4703 Label heap_number; 4700 Label heap_number;
4704 Register input_reg = ToRegister(instr->value()); 4701 Register input_reg = ToRegister(instr->value());
4705 4702
4706 4703
4707 if (instr->truncating()) { 4704 if (instr->truncating()) {
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
4740 codegen()->DoDeferredTaggedToI(instr_, done()); 4737 codegen()->DoDeferredTaggedToI(instr_, done());
4741 } 4738 }
4742 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4739 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4743 private: 4740 private:
4744 LTaggedToI* instr_; 4741 LTaggedToI* instr_;
4745 }; 4742 };
4746 4743
4747 LOperand* input = instr->value(); 4744 LOperand* input = instr->value();
4748 ASSERT(input->IsRegister()); 4745 ASSERT(input->IsRegister());
4749 ASSERT(input->Equals(instr->result())); 4746 ASSERT(input->Equals(instr->result()));
4747 Register input_reg = ToRegister(input);
4750 4748
4751 Register input_reg = ToRegister(input); 4749 if (instr->hydrogen()->value()->representation().IsSmi()) {
4752 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); 4750 __ SmiToInteger32(input_reg, input_reg);
4753 __ JumpIfNotSmi(input_reg, deferred->entry()); 4751 } else {
4754 __ SmiToInteger32(input_reg, input_reg); 4752 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4755 __ bind(deferred->exit()); 4753 __ JumpIfNotSmi(input_reg, deferred->entry());
4754 __ SmiToInteger32(input_reg, input_reg);
4755 __ bind(deferred->exit());
4756 }
4756 } 4757 }
4757 4758
4758 4759
4759 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 4760 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4760 LOperand* input = instr->value(); 4761 LOperand* input = instr->value();
4761 ASSERT(input->IsRegister()); 4762 ASSERT(input->IsRegister());
4762 LOperand* result = instr->result(); 4763 LOperand* result = instr->result();
4763 ASSERT(result->IsDoubleRegister()); 4764 ASSERT(result->IsDoubleRegister());
4764 4765
4765 Register input_reg = ToRegister(input); 4766 Register input_reg = ToRegister(input);
(...skipping 753 matching lines...) Expand 10 before | Expand all | Expand 10 after
5519 FixedArray::kHeaderSize - kPointerSize)); 5520 FixedArray::kHeaderSize - kPointerSize));
5520 __ bind(&done); 5521 __ bind(&done);
5521 } 5522 }
5522 5523
5523 5524
5524 #undef __ 5525 #undef __
5525 5526
5526 } } // namespace v8::internal 5527 } } // namespace v8::internal
5527 5528
5528 #endif // V8_TARGET_ARCH_X64 5529 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/lithium-codegen-x64.h ('k') | src/x64/lithium-x64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698