| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/x64/codegen-x64.h" | 5 #include "src/x64/codegen-x64.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_X64 | 7 #if V8_TARGET_ARCH_X64 |
| 8 | 8 |
| 9 #include "src/codegen.h" | 9 #include "src/codegen.h" |
| 10 #include "src/macro-assembler.h" | 10 #include "src/macro-assembler.h" |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 44 // xmm0: raw double input. | 44 // xmm0: raw double input. |
| 45 XMMRegister input = xmm0; | 45 XMMRegister input = xmm0; |
| 46 XMMRegister result = xmm1; | 46 XMMRegister result = xmm1; |
| 47 __ pushq(rax); | 47 __ pushq(rax); |
| 48 __ pushq(rbx); | 48 __ pushq(rbx); |
| 49 | 49 |
| 50 MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx); | 50 MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx); |
| 51 | 51 |
| 52 __ popq(rbx); | 52 __ popq(rbx); |
| 53 __ popq(rax); | 53 __ popq(rax); |
| 54 __ movsd(xmm0, result); | 54 __ Movsd(xmm0, result); |
| 55 __ Ret(); | 55 __ Ret(); |
| 56 | 56 |
| 57 CodeDesc desc; | 57 CodeDesc desc; |
| 58 masm.GetCode(&desc); | 58 masm.GetCode(&desc); |
| 59 DCHECK(!RelocInfo::RequiresRelocation(desc)); | 59 DCHECK(!RelocInfo::RequiresRelocation(desc)); |
| 60 | 60 |
| 61 Assembler::FlushICacheWithoutIsolate(buffer, actual_size); | 61 Assembler::FlushICacheWithoutIsolate(buffer, actual_size); |
| 62 base::OS::ProtectCode(buffer, actual_size); | 62 base::OS::ProtectCode(buffer, actual_size); |
| 63 return FUNCTION_CAST<UnaryMathFunction>(buffer); | 63 return FUNCTION_CAST<UnaryMathFunction>(buffer); |
| 64 } | 64 } |
| (...skipping 23 matching lines...) Expand all Loading... |
| 88 | 88 |
| 89 | 89 |
| 90 #ifdef _WIN64 | 90 #ifdef _WIN64 |
| 91 typedef double (*ModuloFunction)(double, double); | 91 typedef double (*ModuloFunction)(double, double); |
| 92 // Define custom fmod implementation. | 92 // Define custom fmod implementation. |
| 93 ModuloFunction CreateModuloFunction() { | 93 ModuloFunction CreateModuloFunction() { |
| 94 size_t actual_size; | 94 size_t actual_size; |
| 95 byte* buffer = static_cast<byte*>( | 95 byte* buffer = static_cast<byte*>( |
| 96 base::OS::Allocate(Assembler::kMinimalBufferSize, &actual_size, true)); | 96 base::OS::Allocate(Assembler::kMinimalBufferSize, &actual_size, true)); |
| 97 CHECK(buffer); | 97 CHECK(buffer); |
| 98 Assembler masm(NULL, buffer, static_cast<int>(actual_size)); | 98 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); |
| 99 // Generated code is put into a fixed, unmovable, buffer, and not into | 99 // Generated code is put into a fixed, unmovable, buffer, and not into |
| 100 // the V8 heap. We can't, and don't, refer to any relocatable addresses | 100 // the V8 heap. We can't, and don't, refer to any relocatable addresses |
| 101 // (e.g. the JavaScript nan-object). | 101 // (e.g. the JavaScript nan-object). |
| 102 | 102 |
| 103 // Windows 64 ABI passes double arguments in xmm0, xmm1 and | 103 // Windows 64 ABI passes double arguments in xmm0, xmm1 and |
| 104 // returns result in xmm0. | 104 // returns result in xmm0. |
| 105 // Argument backing space is allocated on the stack above | 105 // Argument backing space is allocated on the stack above |
| 106 // the return address. | 106 // the return address. |
| 107 | 107 |
| 108 // Compute x mod y. | 108 // Compute x mod y. |
| 109 // Load y and x (use argument backing store as temporary storage). | 109 // Load y and x (use argument backing store as temporary storage). |
| 110 __ movsd(Operand(rsp, kRegisterSize * 2), xmm1); | 110 __ Movsd(Operand(rsp, kRegisterSize * 2), xmm1); |
| 111 __ movsd(Operand(rsp, kRegisterSize), xmm0); | 111 __ Movsd(Operand(rsp, kRegisterSize), xmm0); |
| 112 __ fld_d(Operand(rsp, kRegisterSize * 2)); | 112 __ fld_d(Operand(rsp, kRegisterSize * 2)); |
| 113 __ fld_d(Operand(rsp, kRegisterSize)); | 113 __ fld_d(Operand(rsp, kRegisterSize)); |
| 114 | 114 |
| 115 // Clear exception flags before operation. | 115 // Clear exception flags before operation. |
| 116 { | 116 { |
| 117 Label no_exceptions; | 117 Label no_exceptions; |
| 118 __ fwait(); | 118 __ fwait(); |
| 119 __ fnstsw_ax(); | 119 __ fnstsw_ax(); |
| 120 // Clear if Illegal Operand or Zero Division exceptions are set. | 120 // Clear if Illegal Operand or Zero Division exceptions are set. |
| 121 __ testb(rax, Immediate(5)); | 121 __ testb(rax, Immediate(5)); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 140 Label valid_result; | 140 Label valid_result; |
| 141 Label return_result; | 141 Label return_result; |
| 142 // If Invalid Operand or Zero Division exceptions are set, | 142 // If Invalid Operand or Zero Division exceptions are set, |
| 143 // return NaN. | 143 // return NaN. |
| 144 __ testb(rax, Immediate(5)); | 144 __ testb(rax, Immediate(5)); |
| 145 __ j(zero, &valid_result); | 145 __ j(zero, &valid_result); |
| 146 __ fstp(0); // Drop result in st(0). | 146 __ fstp(0); // Drop result in st(0). |
| 147 int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000); | 147 int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000); |
| 148 __ movq(rcx, kNaNValue); | 148 __ movq(rcx, kNaNValue); |
| 149 __ movq(Operand(rsp, kRegisterSize), rcx); | 149 __ movq(Operand(rsp, kRegisterSize), rcx); |
| 150 __ movsd(xmm0, Operand(rsp, kRegisterSize)); | 150 __ Movsd(xmm0, Operand(rsp, kRegisterSize)); |
| 151 __ jmp(&return_result); | 151 __ jmp(&return_result); |
| 152 | 152 |
| 153 // If result is valid, return that. | 153 // If result is valid, return that. |
| 154 __ bind(&valid_result); | 154 __ bind(&valid_result); |
| 155 __ fstp_d(Operand(rsp, kRegisterSize)); | 155 __ fstp_d(Operand(rsp, kRegisterSize)); |
| 156 __ movsd(xmm0, Operand(rsp, kRegisterSize)); | 156 __ Movsd(xmm0, Operand(rsp, kRegisterSize)); |
| 157 | 157 |
| 158 // Clean up FPU stack and exceptions and return xmm0 | 158 // Clean up FPU stack and exceptions and return xmm0 |
| 159 __ bind(&return_result); | 159 __ bind(&return_result); |
| 160 __ fstp(0); // Unload y. | 160 __ fstp(0); // Unload y. |
| 161 | 161 |
| 162 Label clear_exceptions; | 162 Label clear_exceptions; |
| 163 __ testb(rax, Immediate(0x3f /* Any Exception*/)); | 163 __ testb(rax, Immediate(0x3f /* Any Exception*/)); |
| 164 __ j(not_zero, &clear_exceptions); | 164 __ j(not_zero, &clear_exceptions); |
| 165 __ ret(0); | 165 __ ret(0); |
| 166 __ bind(&clear_exceptions); | 166 __ bind(&clear_exceptions); |
| (...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 326 | 326 |
| 327 // Conversion loop. | 327 // Conversion loop. |
| 328 __ bind(&loop); | 328 __ bind(&loop); |
| 329 __ movp(rbx, | 329 __ movp(rbx, |
| 330 FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize)); | 330 FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize)); |
| 331 // r9 : current element's index | 331 // r9 : current element's index |
| 332 // rbx: current element (smi-tagged) | 332 // rbx: current element (smi-tagged) |
| 333 __ JumpIfNotSmi(rbx, &convert_hole); | 333 __ JumpIfNotSmi(rbx, &convert_hole); |
| 334 __ SmiToInteger32(rbx, rbx); | 334 __ SmiToInteger32(rbx, rbx); |
| 335 __ Cvtlsi2sd(xmm0, rbx); | 335 __ Cvtlsi2sd(xmm0, rbx); |
| 336 __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), | 336 __ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), xmm0); |
| 337 xmm0); | |
| 338 __ jmp(&entry); | 337 __ jmp(&entry); |
| 339 __ bind(&convert_hole); | 338 __ bind(&convert_hole); |
| 340 | 339 |
| 341 if (FLAG_debug_code) { | 340 if (FLAG_debug_code) { |
| 342 __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex); | 341 __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex); |
| 343 __ Assert(equal, kObjectFoundInSmiOnlyArray); | 342 __ Assert(equal, kObjectFoundInSmiOnlyArray); |
| 344 } | 343 } |
| 345 | 344 |
| 346 __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15); | 345 __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15); |
| 347 __ bind(&entry); | 346 __ bind(&entry); |
| (...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 597 DCHECK(!input.is(result)); | 596 DCHECK(!input.is(result)); |
| 598 DCHECK(!input.is(double_scratch)); | 597 DCHECK(!input.is(double_scratch)); |
| 599 DCHECK(!result.is(double_scratch)); | 598 DCHECK(!result.is(double_scratch)); |
| 600 DCHECK(!temp1.is(temp2)); | 599 DCHECK(!temp1.is(temp2)); |
| 601 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL); | 600 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL); |
| 602 DCHECK(!masm->serializer_enabled()); // External references not serializable. | 601 DCHECK(!masm->serializer_enabled()); // External references not serializable. |
| 603 | 602 |
| 604 Label done; | 603 Label done; |
| 605 | 604 |
| 606 __ Move(kScratchRegister, ExternalReference::math_exp_constants(0)); | 605 __ Move(kScratchRegister, ExternalReference::math_exp_constants(0)); |
| 607 __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize)); | 606 __ Movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize)); |
| 608 __ xorpd(result, result); | 607 __ xorpd(result, result); |
| 609 __ ucomisd(double_scratch, input); | 608 __ ucomisd(double_scratch, input); |
| 610 __ j(above_equal, &done); | 609 __ j(above_equal, &done); |
| 611 __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize)); | 610 __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize)); |
| 612 __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize)); | 611 __ Movsd(result, Operand(kScratchRegister, 2 * kDoubleSize)); |
| 613 __ j(above_equal, &done); | 612 __ j(above_equal, &done); |
| 614 __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize)); | 613 __ Movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize)); |
| 615 __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize)); | 614 __ Movsd(result, Operand(kScratchRegister, 4 * kDoubleSize)); |
| 616 __ mulsd(double_scratch, input); | 615 __ mulsd(double_scratch, input); |
| 617 __ addsd(double_scratch, result); | 616 __ addsd(double_scratch, result); |
| 618 __ movq(temp2, double_scratch); | 617 __ movq(temp2, double_scratch); |
| 619 __ subsd(double_scratch, result); | 618 __ subsd(double_scratch, result); |
| 620 __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize)); | 619 __ Movsd(result, Operand(kScratchRegister, 6 * kDoubleSize)); |
| 621 __ leaq(temp1, Operand(temp2, 0x1ff800)); | 620 __ leaq(temp1, Operand(temp2, 0x1ff800)); |
| 622 __ andq(temp2, Immediate(0x7ff)); | 621 __ andq(temp2, Immediate(0x7ff)); |
| 623 __ shrq(temp1, Immediate(11)); | 622 __ shrq(temp1, Immediate(11)); |
| 624 __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize)); | 623 __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize)); |
| 625 __ Move(kScratchRegister, ExternalReference::math_exp_log_table()); | 624 __ Move(kScratchRegister, ExternalReference::math_exp_log_table()); |
| 626 __ shlq(temp1, Immediate(52)); | 625 __ shlq(temp1, Immediate(52)); |
| 627 __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0)); | 626 __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0)); |
| 628 __ Move(kScratchRegister, ExternalReference::math_exp_constants(0)); | 627 __ Move(kScratchRegister, ExternalReference::math_exp_constants(0)); |
| 629 __ subsd(double_scratch, input); | 628 __ subsd(double_scratch, input); |
| 630 __ movsd(input, double_scratch); | 629 __ Movsd(input, double_scratch); |
| 631 __ subsd(result, double_scratch); | 630 __ subsd(result, double_scratch); |
| 632 __ mulsd(input, double_scratch); | 631 __ mulsd(input, double_scratch); |
| 633 __ mulsd(result, input); | 632 __ mulsd(result, input); |
| 634 __ movq(input, temp1); | 633 __ movq(input, temp1); |
| 635 __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize)); | 634 __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize)); |
| 636 __ subsd(result, double_scratch); | 635 __ subsd(result, double_scratch); |
| 637 __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize)); | 636 __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize)); |
| 638 __ mulsd(result, input); | 637 __ mulsd(result, input); |
| 639 | 638 |
| 640 __ bind(&done); | 639 __ bind(&done); |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 721 return Operand(base_reg_, argument_count_reg_, times_pointer_size, | 720 return Operand(base_reg_, argument_count_reg_, times_pointer_size, |
| 722 displacement_to_last_argument + (receiver - 1 - index) * kPointerSize); | 721 displacement_to_last_argument + (receiver - 1 - index) * kPointerSize); |
| 723 } | 722 } |
| 724 } | 723 } |
| 725 | 724 |
| 726 | 725 |
| 727 } // namespace internal | 726 } // namespace internal |
| 728 } // namespace v8 | 727 } // namespace v8 |
| 729 | 728 |
| 730 #endif // V8_TARGET_ARCH_X64 | 729 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |