| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 150 // Reserve space for the stack slots needed by the code. | 150 // Reserve space for the stack slots needed by the code. |
| 151 int slots = GetStackSlotCount(); | 151 int slots = GetStackSlotCount(); |
| 152 if (slots > 0) { | 152 if (slots > 0) { |
| 153 if (FLAG_debug_code) { | 153 if (FLAG_debug_code) { |
| 154 __ subq(rsp, Immediate(slots * kPointerSize)); | 154 __ subq(rsp, Immediate(slots * kPointerSize)); |
| 155 #ifdef _MSC_VER | 155 #ifdef _MSC_VER |
| 156 MakeSureStackPagesMapped(slots * kPointerSize); | 156 MakeSureStackPagesMapped(slots * kPointerSize); |
| 157 #endif | 157 #endif |
| 158 __ push(rax); | 158 __ push(rax); |
| 159 __ Set(rax, slots); | 159 __ Set(rax, slots); |
| 160 __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64); | 160 __ movq(kScratchRegister, kSlotsZapValue); |
| 161 Label loop; | 161 Label loop; |
| 162 __ bind(&loop); | 162 __ bind(&loop); |
| 163 __ movq(MemOperand(rsp, rax, times_pointer_size, 0), | 163 __ movq(MemOperand(rsp, rax, times_pointer_size, 0), |
| 164 kScratchRegister); | 164 kScratchRegister); |
| 165 __ decl(rax); | 165 __ decl(rax); |
| 166 __ j(not_zero, &loop); | 166 __ j(not_zero, &loop); |
| 167 __ pop(rax); | 167 __ pop(rax); |
| 168 } else { | 168 } else { |
| 169 __ subq(rsp, Immediate(slots * kPointerSize)); | 169 __ subq(rsp, Immediate(slots * kPointerSize)); |
| 170 #ifdef _MSC_VER | 170 #ifdef _MSC_VER |
| (...skipping 945 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1116 // The multiplier is a uint32. | 1116 // The multiplier is a uint32. |
| 1117 ASSERT(multiplier > 0 && | 1117 ASSERT(multiplier > 0 && |
| 1118 multiplier < (static_cast<int64_t>(1) << 32)); | 1118 multiplier < (static_cast<int64_t>(1) << 32)); |
| 1119 // The multiply is int64, so sign-extend to r64. | 1119 // The multiply is int64, so sign-extend to r64. |
| 1120 __ movsxlq(reg1, dividend); | 1120 __ movsxlq(reg1, dividend); |
| 1121 if (divisor < 0 && | 1121 if (divisor < 0 && |
| 1122 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1122 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1123 __ neg(reg1); | 1123 __ neg(reg1); |
| 1124 DeoptimizeIf(zero, instr->environment()); | 1124 DeoptimizeIf(zero, instr->environment()); |
| 1125 } | 1125 } |
| 1126 __ movq(reg2, multiplier, RelocInfo::NONE64); | 1126 __ Set(reg2, multiplier); |
| 1127 // Result just fit in r64, because it's int32 * uint32. | 1127 // Result just fit in r64, because it's int32 * uint32. |
| 1128 __ imul(reg2, reg1); | 1128 __ imul(reg2, reg1); |
| 1129 | 1129 |
| 1130 __ addq(reg2, Immediate(1 << 30)); | 1130 __ addq(reg2, Immediate(1 << 30)); |
| 1131 __ sar(reg2, Immediate(shift)); | 1131 __ sar(reg2, Immediate(shift)); |
| 1132 } | 1132 } |
| 1133 } | 1133 } |
| 1134 | 1134 |
| 1135 | 1135 |
| 1136 void LCodeGen::DoDivI(LDivI* instr) { | 1136 void LCodeGen::DoDivI(LDivI* instr) { |
| (...skipping 2309 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3446 | 3446 |
| 3447 | 3447 |
| 3448 void LCodeGen::DoMathRound(LMathRound* instr) { | 3448 void LCodeGen::DoMathRound(LMathRound* instr) { |
| 3449 const XMMRegister xmm_scratch = double_scratch0(); | 3449 const XMMRegister xmm_scratch = double_scratch0(); |
| 3450 Register output_reg = ToRegister(instr->result()); | 3450 Register output_reg = ToRegister(instr->result()); |
| 3451 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3451 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3452 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 | 3452 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 |
| 3453 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 | 3453 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 |
| 3454 | 3454 |
| 3455 Label done, round_to_zero, below_one_half, do_not_compensate, restore; | 3455 Label done, round_to_zero, below_one_half, do_not_compensate, restore; |
| 3456 __ movq(kScratchRegister, one_half, RelocInfo::NONE64); | 3456 __ movq(kScratchRegister, one_half); |
| 3457 __ movq(xmm_scratch, kScratchRegister); | 3457 __ movq(xmm_scratch, kScratchRegister); |
| 3458 __ ucomisd(xmm_scratch, input_reg); | 3458 __ ucomisd(xmm_scratch, input_reg); |
| 3459 __ j(above, &below_one_half); | 3459 __ j(above, &below_one_half); |
| 3460 | 3460 |
| 3461 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). | 3461 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). |
| 3462 __ addsd(xmm_scratch, input_reg); | 3462 __ addsd(xmm_scratch, input_reg); |
| 3463 __ cvttsd2si(output_reg, xmm_scratch); | 3463 __ cvttsd2si(output_reg, xmm_scratch); |
| 3464 // Overflow is signalled with minint. | 3464 // Overflow is signalled with minint. |
| 3465 __ cmpl(output_reg, Immediate(0x80000000)); | 3465 __ cmpl(output_reg, Immediate(0x80000000)); |
| 3466 __ RecordComment("D2I conversion overflow"); | 3466 __ RecordComment("D2I conversion overflow"); |
| 3467 DeoptimizeIf(equal, instr->environment()); | 3467 DeoptimizeIf(equal, instr->environment()); |
| 3468 __ jmp(&done); | 3468 __ jmp(&done); |
| 3469 | 3469 |
| 3470 __ bind(&below_one_half); | 3470 __ bind(&below_one_half); |
| 3471 __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64); | 3471 __ movq(kScratchRegister, minus_one_half); |
| 3472 __ movq(xmm_scratch, kScratchRegister); | 3472 __ movq(xmm_scratch, kScratchRegister); |
| 3473 __ ucomisd(xmm_scratch, input_reg); | 3473 __ ucomisd(xmm_scratch, input_reg); |
| 3474 __ j(below_equal, &round_to_zero); | 3474 __ j(below_equal, &round_to_zero); |
| 3475 | 3475 |
| 3476 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then | 3476 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then |
| 3477 // compare and compensate. | 3477 // compare and compensate. |
| 3478 __ movq(kScratchRegister, input_reg); // Back up input_reg. | 3478 __ movq(kScratchRegister, input_reg); // Back up input_reg. |
| 3479 __ subsd(input_reg, xmm_scratch); | 3479 __ subsd(input_reg, xmm_scratch); |
| 3480 __ cvttsd2si(output_reg, input_reg); | 3480 __ cvttsd2si(output_reg, input_reg); |
| 3481 // Catch minint due to overflow, and to prevent overflow when compensating. | 3481 // Catch minint due to overflow, and to prevent overflow when compensating. |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3517 XMMRegister xmm_scratch = double_scratch0(); | 3517 XMMRegister xmm_scratch = double_scratch0(); |
| 3518 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3518 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3519 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); | 3519 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
| 3520 | 3520 |
| 3521 // Note that according to ECMA-262 15.8.2.13: | 3521 // Note that according to ECMA-262 15.8.2.13: |
| 3522 // Math.pow(-Infinity, 0.5) == Infinity | 3522 // Math.pow(-Infinity, 0.5) == Infinity |
| 3523 // Math.sqrt(-Infinity) == NaN | 3523 // Math.sqrt(-Infinity) == NaN |
| 3524 Label done, sqrt; | 3524 Label done, sqrt; |
| 3525 // Check base for -Infinity. According to IEEE-754, double-precision | 3525 // Check base for -Infinity. According to IEEE-754, double-precision |
| 3526 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared. | 3526 // -Infinity has the highest 12 bits set and the lowest 52 bits cleared. |
| 3527 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64); | 3527 __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000)); |
| 3528 __ movq(xmm_scratch, kScratchRegister); | 3528 __ movq(xmm_scratch, kScratchRegister); |
| 3529 __ ucomisd(xmm_scratch, input_reg); | 3529 __ ucomisd(xmm_scratch, input_reg); |
| 3530 // Comparing -Infinity with NaN results in "unordered", which sets the | 3530 // Comparing -Infinity with NaN results in "unordered", which sets the |
| 3531 // zero flag as if both were equal. However, it also sets the carry flag. | 3531 // zero flag as if both were equal. However, it also sets the carry flag. |
| 3532 __ j(not_equal, &sqrt, Label::kNear); | 3532 __ j(not_equal, &sqrt, Label::kNear); |
| 3533 __ j(carry, &sqrt, Label::kNear); | 3533 __ j(carry, &sqrt, Label::kNear); |
| 3534 // If input is -Infinity, return Infinity. | 3534 // If input is -Infinity, return Infinity. |
| 3535 __ xorps(input_reg, input_reg); | 3535 __ xorps(input_reg, input_reg); |
| 3536 __ subsd(input_reg, xmm_scratch); | 3536 __ subsd(input_reg, xmm_scratch); |
| 3537 __ jmp(&done, Label::kNear); | 3537 __ jmp(&done, Label::kNear); |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3625 Register random = state0; | 3625 Register random = state0; |
| 3626 __ shll(random, Immediate(14)); | 3626 __ shll(random, Immediate(14)); |
| 3627 __ andl(state1, Immediate(0x3FFFF)); | 3627 __ andl(state1, Immediate(0x3FFFF)); |
| 3628 __ addl(random, state1); | 3628 __ addl(random, state1); |
| 3629 | 3629 |
| 3630 // Convert 32 random bits in rax to 0.(32 random bits) in a double | 3630 // Convert 32 random bits in rax to 0.(32 random bits) in a double |
| 3631 // by computing: | 3631 // by computing: |
| 3632 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). | 3632 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). |
| 3633 XMMRegister result = ToDoubleRegister(instr->result()); | 3633 XMMRegister result = ToDoubleRegister(instr->result()); |
| 3634 XMMRegister scratch4 = double_scratch0(); | 3634 XMMRegister scratch4 = double_scratch0(); |
| 3635 __ movq(scratch3, V8_INT64_C(0x4130000000000000), | 3635 __ movq(scratch3, V8_INT64_C(0x4130000000000000)); // 1.0 x 2^20 as double |
| 3636 RelocInfo::NONE64); // 1.0 x 2^20 as double | |
| 3637 __ movq(scratch4, scratch3); | 3636 __ movq(scratch4, scratch3); |
| 3638 __ movd(result, random); | 3637 __ movd(result, random); |
| 3639 __ xorps(result, scratch4); | 3638 __ xorps(result, scratch4); |
| 3640 __ subsd(result, scratch4); | 3639 __ subsd(result, scratch4); |
| 3641 } | 3640 } |
| 3642 | 3641 |
| 3643 | 3642 |
| 3644 void LCodeGen::DoMathExp(LMathExp* instr) { | 3643 void LCodeGen::DoMathExp(LMathExp* instr) { |
| 3645 XMMRegister input = ToDoubleRegister(instr->value()); | 3644 XMMRegister input = ToDoubleRegister(instr->value()); |
| 3646 XMMRegister result = ToDoubleRegister(instr->result()); | 3645 XMMRegister result = ToDoubleRegister(instr->result()); |
| (...skipping 1808 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5455 FixedArray::kHeaderSize - kPointerSize)); | 5454 FixedArray::kHeaderSize - kPointerSize)); |
| 5456 __ bind(&done); | 5455 __ bind(&done); |
| 5457 } | 5456 } |
| 5458 | 5457 |
| 5459 | 5458 |
| 5460 #undef __ | 5459 #undef __ |
| 5461 | 5460 |
| 5462 } } // namespace v8::internal | 5461 } } // namespace v8::internal |
| 5463 | 5462 |
| 5464 #endif // V8_TARGET_ARCH_X64 | 5463 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |