OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
270 | 270 |
271 // Test if operands are smi or number objects (fp). Requirements: | 271 // Test if operands are smi or number objects (fp). Requirements: |
272 // operand_1 in rax, operand_2 in rdx; falls through on float or smi | 272 // operand_1 in rax, operand_2 in rdx; falls through on float or smi |
273 // operands, jumps to the non_float label otherwise. | 273 // operands, jumps to the non_float label otherwise. |
274 static void CheckNumberOperands(MacroAssembler* masm, | 274 static void CheckNumberOperands(MacroAssembler* masm, |
275 Label* non_float); | 275 Label* non_float); |
276 | 276 |
277 // Takes the operands in rdx and rax and loads them as integers in rax | 277 // Takes the operands in rdx and rax and loads them as integers in rax |
278 // and rcx. | 278 // and rcx. |
279 static void LoadAsIntegers(MacroAssembler* masm, | 279 static void LoadAsIntegers(MacroAssembler* masm, |
280 bool use_sse3, | |
281 Label* operand_conversion_failure); | 280 Label* operand_conversion_failure); |
282 }; | 281 }; |
283 | 282 |
284 | 283 |
285 // ----------------------------------------------------------------------------- | 284 // ----------------------------------------------------------------------------- |
286 // CodeGenerator implementation. | 285 // CodeGenerator implementation. |
287 | 286 |
288 CodeGenerator::CodeGenerator(MacroAssembler* masm) | 287 CodeGenerator::CodeGenerator(MacroAssembler* masm) |
289 : deferred_(8), | 288 : deferred_(8), |
290 masm_(masm), | 289 masm_(masm), |
(...skipping 7596 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7887 case TranscendentalCache::COS: | 7886 case TranscendentalCache::COS: |
7888 __ fcos(); | 7887 __ fcos(); |
7889 break; | 7888 break; |
7890 default: | 7889 default: |
7891 UNREACHABLE(); | 7890 UNREACHABLE(); |
7892 } | 7891 } |
7893 __ bind(&done); | 7892 __ bind(&done); |
7894 } | 7893 } |
7895 | 7894 |
7896 | 7895 |
7897 // Get the integer part of a heap number. Surprisingly, all this bit twiddling | 7896 // Get the integer part of a heap number. |
7898 // is faster than using the built-in instructions on floating point registers. | |
7899 // Trashes rdi and rbx. Dest is rcx. Source cannot be rcx or one of the | 7897 // Trashes rdi and rbx. Dest is rcx. Source cannot be rcx or one of the |
7900 // trashed registers. | 7898 // trashed registers. |
7901 void IntegerConvert(MacroAssembler* masm, | 7899 void IntegerConvert(MacroAssembler* masm, |
7902 Register source, | 7900 Register source, |
7903 bool use_sse3, | |
7904 Label* conversion_failure) { | 7901 Label* conversion_failure) { |
7905 ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx)); | 7902 ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx)); |
7906 Label done, right_exponent, normal_exponent; | |
7907 Register scratch = rbx; | 7903 Register scratch = rbx; |
7908 Register scratch2 = rdi; | 7904 Register scratch2 = rdi; |
7909 // Get exponent word. | 7905 // Get exponent word. |
7910 __ movl(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); | 7906 __ movq(scratch2, FieldOperand(source, HeapNumber::kValueOffset)); |
7911 // Get exponent alone in scratch2. | 7907 // Get exponent alone in scratch2. |
7912 __ movl(scratch2, scratch); | 7908 __ movq(xmm0, scratch2); |
7913 __ and_(scratch2, Immediate(HeapNumber::kExponentMask)); | 7909 __ shr(scratch2, Immediate(HeapNumber::kMantissaBits)); |
7914 if (use_sse3) { | 7910 __ andl(scratch2, Immediate((1 << HeapNumber::KExponentBits) - 1)); |
7915 CpuFeatures::Scope scope(SSE3); | 7911 // Check whether the exponent is too big for a 63 bit unsigned integer. |
7916 // Check whether the exponent is too big for a 64 bit signed integer. | 7912 // (Notice: Doesn't handle MIN_SMI). |
7917 static const uint32_t kTooBigExponent = | 7913 __ cmpl(scratch2, Immediate(63 + HeapNumber::kExponentBias)); |
7918 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; | 7914 __ j(greater_equal, conversion_failure); |
7919 __ cmpl(scratch2, Immediate(kTooBigExponent)); | 7915 // Handle exponent range -inf..62. |
7920 __ j(greater_equal, conversion_failure); | 7916 __ cvttsd2siq(rcx, xmm0); |
7921 // Load x87 register with heap number. | 7917 // TODO(lrn): Do bit-fiddling for exponents in range 63..84 and return |
7922 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); | 7918 // zero for everything else (also including negative exponents). |
7923 // Reserve space for 64 bit answer. | |
7924 __ subq(rsp, Immediate(sizeof(uint64_t))); // Nolint. | |
7925 // Do conversion, which cannot fail because we checked the exponent. | |
7926 __ fisttp_d(Operand(rsp, 0)); | |
7927 __ movl(rcx, Operand(rsp, 0)); // Load low word of answer into rcx. | |
7928 __ addq(rsp, Immediate(sizeof(uint64_t))); // Nolint. | |
7929 } else { | |
7930 // Load rcx with zero. We use this either for the final shift or | |
7931 // for the answer. | |
7932 __ xor_(rcx, rcx); | |
7933 // Check whether the exponent matches a 32 bit signed int that cannot be | |
7934 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the | |
7935 // exponent is 30 (biased). This is the exponent that we are fastest at and | |
7936 // also the highest exponent we can handle here. | |
7937 const uint32_t non_smi_exponent = | |
7938 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; | |
7939 __ cmpl(scratch2, Immediate(non_smi_exponent)); | |
7940 // If we have a match of the int32-but-not-Smi exponent then skip some | |
7941 // logic. | |
7942 __ j(equal, &right_exponent); | |
7943 // If the exponent is higher than that then go to slow case. This catches | |
7944 // numbers that don't fit in a signed int32, infinities and NaNs. | |
7945 __ j(less, &normal_exponent); | |
7946 | |
7947 { | |
7948 // Handle a big exponent. The only reason we have this code is that the | |
7949 // >>> operator has a tendency to generate numbers with an exponent of 31. | |
7950 const uint32_t big_non_smi_exponent = | |
7951 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; | |
7952 __ cmpl(scratch2, Immediate(big_non_smi_exponent)); | |
7953 __ j(not_equal, conversion_failure); | |
7954 // We have the big exponent, typically from >>>. This means the number is | |
7955 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. | |
7956 __ movl(scratch2, scratch); | |
7957 __ and_(scratch2, Immediate(HeapNumber::kMantissaMask)); | |
7958 // Put back the implicit 1. | |
7959 __ or_(scratch2, Immediate(1 << HeapNumber::kExponentShift)); | |
7960 // Shift up the mantissa bits to take up the space the exponent used to | |
7961 // take. We just orred in the implicit bit so that took care of one and | |
7962 // we want to use the full unsigned range so we subtract 1 bit from the | |
7963 // shift distance. | |
7964 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1; | |
7965 __ shl(scratch2, Immediate(big_shift_distance)); | |
7966 // Get the second half of the double. | |
7967 __ movl(rcx, FieldOperand(source, HeapNumber::kMantissaOffset)); | |
7968 // Shift down 21 bits to get the most significant 11 bits or the low | |
7969 // mantissa word. | |
7970 __ shr(rcx, Immediate(32 - big_shift_distance)); | |
7971 __ or_(rcx, scratch2); | |
7972 // We have the answer in rcx, but we may need to negate it. | |
7973 __ testl(scratch, scratch); | |
7974 __ j(positive, &done); | |
7975 __ neg(rcx); | |
7976 __ jmp(&done); | |
7977 } | |
7978 | |
7979 __ bind(&normal_exponent); | |
7980 // Exponent word in scratch, exponent part of exponent word in scratch2. | |
7981 // Zero in rcx. | |
7982 // We know the exponent is smaller than 30 (biased). If it is less than | |
7983 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie | |
7984 // it rounds to zero. | |
7985 const uint32_t zero_exponent = | |
7986 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; | |
7987 __ subl(scratch2, Immediate(zero_exponent)); | |
7988 // rcx already has a Smi zero. | |
7989 __ j(less, &done); | |
7990 | |
7991 // We have a shifted exponent between 0 and 30 in scratch2. | |
7992 __ shr(scratch2, Immediate(HeapNumber::kExponentShift)); | |
7993 __ movl(rcx, Immediate(30)); | |
7994 __ subl(rcx, scratch2); | |
7995 | |
7996 __ bind(&right_exponent); | |
7997 // Here rcx is the shift, scratch is the exponent word. | |
7998 // Get the top bits of the mantissa. | |
7999 __ and_(scratch, Immediate(HeapNumber::kMantissaMask)); | |
8000 // Put back the implicit 1. | |
8001 __ or_(scratch, Immediate(1 << HeapNumber::kExponentShift)); | |
8002 // Shift up the mantissa bits to take up the space the exponent used to | |
8003 // take. We have kExponentShift + 1 significant bits int he low end of the | |
8004 // word. Shift them to the top bits. | |
8005 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | |
8006 __ shl(scratch, Immediate(shift_distance)); | |
8007 // Get the second half of the double. For some exponents we don't | |
8008 // actually need this because the bits get shifted out again, but | |
8009 // it's probably slower to test than just to do it. | |
8010 __ movl(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset)); | |
8011 // Shift down 22 bits to get the most significant 10 bits or the low | |
8012 // mantissa word. | |
8013 __ shr(scratch2, Immediate(32 - shift_distance)); | |
8014 __ or_(scratch2, scratch); | |
8015 // Move down according to the exponent. | |
8016 __ shr_cl(scratch2); | |
8017 // Now the unsigned answer is in scratch2. We need to move it to rcx and | |
8018 // we may need to fix the sign. | |
8019 Label negative; | |
8020 __ xor_(rcx, rcx); | |
8021 __ cmpl(rcx, FieldOperand(source, HeapNumber::kExponentOffset)); | |
8022 __ j(greater, &negative); | |
8023 __ movl(rcx, scratch2); | |
8024 __ jmp(&done); | |
8025 __ bind(&negative); | |
8026 __ subl(rcx, scratch2); | |
8027 __ bind(&done); | |
8028 } | |
8029 } | 7919 } |
8030 | 7920 |
8031 | 7921 |
8032 void GenericUnaryOpStub::Generate(MacroAssembler* masm) { | 7922 void GenericUnaryOpStub::Generate(MacroAssembler* masm) { |
8033 Label slow, done; | 7923 Label slow, done; |
8034 | 7924 |
8035 if (op_ == Token::SUB) { | 7925 if (op_ == Token::SUB) { |
8036 // Check whether the value is a smi. | 7926 // Check whether the value is a smi. |
8037 Label try_float; | 7927 Label try_float; |
8038 __ JumpIfNotSmi(rax, &try_float); | 7928 __ JumpIfNotSmi(rax, &try_float); |
(...skipping 29 matching lines...) Expand all Loading... |
8068 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx); | 7958 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx); |
8069 __ movq(rax, rcx); | 7959 __ movq(rax, rcx); |
8070 } | 7960 } |
8071 } else if (op_ == Token::BIT_NOT) { | 7961 } else if (op_ == Token::BIT_NOT) { |
8072 // Check if the operand is a heap number. | 7962 // Check if the operand is a heap number. |
8073 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); | 7963 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); |
8074 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); | 7964 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); |
8075 __ j(not_equal, &slow); | 7965 __ j(not_equal, &slow); |
8076 | 7966 |
8077 // Convert the heap number in rax to an untagged integer in rcx. | 7967 // Convert the heap number in rax to an untagged integer in rcx. |
8078 IntegerConvert(masm, rax, CpuFeatures::IsSupported(SSE3), &slow); | 7968 IntegerConvert(masm, rax, &slow); |
8079 | 7969 |
8080 // Do the bitwise operation and check if the result fits in a smi. | 7970 // Do the bitwise operation and check if the result fits in a smi. |
8081 Label try_float; | 7971 Label try_float; |
8082 __ not_(rcx); | 7972 __ not_(rcx); |
8083 // Tag the result as a smi and we're done. | 7973 // Tag the result as a smi and we're done. |
8084 ASSERT(kSmiTagSize == 1); | 7974 ASSERT(kSmiTagSize == 1); |
8085 __ Integer32ToSmi(rax, rcx); | 7975 __ Integer32ToSmi(rax, rcx); |
8086 } | 7976 } |
8087 | 7977 |
8088 // Return from the stub. | 7978 // Return from the stub. |
(...skipping 1560 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
9649 __ SmiToInteger32(kScratchRegister, rdx); | 9539 __ SmiToInteger32(kScratchRegister, rdx); |
9650 __ cvtlsi2sd(dst1, kScratchRegister); | 9540 __ cvtlsi2sd(dst1, kScratchRegister); |
9651 __ SmiToInteger32(kScratchRegister, rax); | 9541 __ SmiToInteger32(kScratchRegister, rax); |
9652 __ cvtlsi2sd(dst2, kScratchRegister); | 9542 __ cvtlsi2sd(dst2, kScratchRegister); |
9653 } | 9543 } |
9654 | 9544 |
9655 | 9545 |
9656 // Input: rdx, rax are the left and right objects of a bit op. | 9546 // Input: rdx, rax are the left and right objects of a bit op. |
9657 // Output: rax, rcx are left and right integers for a bit op. | 9547 // Output: rax, rcx are left and right integers for a bit op. |
9658 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, | 9548 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
9659 bool use_sse3, | |
9660 Label* conversion_failure) { | 9549 Label* conversion_failure) { |
9661 // Check float operands. | 9550 // Check float operands. |
9662 Label arg1_is_object, check_undefined_arg1; | 9551 Label arg1_is_object, check_undefined_arg1; |
9663 Label arg2_is_object, check_undefined_arg2; | 9552 Label arg2_is_object, check_undefined_arg2; |
9664 Label load_arg2, done; | 9553 Label load_arg2, done; |
9665 | 9554 |
9666 __ JumpIfNotSmi(rdx, &arg1_is_object); | 9555 __ JumpIfNotSmi(rdx, &arg1_is_object); |
9667 __ SmiToInteger32(rdx, rdx); | 9556 __ SmiToInteger32(rdx, rdx); |
9668 __ jmp(&load_arg2); | 9557 __ jmp(&load_arg2); |
9669 | 9558 |
9670 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). | 9559 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). |
9671 __ bind(&check_undefined_arg1); | 9560 __ bind(&check_undefined_arg1); |
9672 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); | 9561 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); |
9673 __ j(not_equal, conversion_failure); | 9562 __ j(not_equal, conversion_failure); |
9674 __ movl(rdx, Immediate(0)); | 9563 __ movl(rdx, Immediate(0)); |
9675 __ jmp(&load_arg2); | 9564 __ jmp(&load_arg2); |
9676 | 9565 |
9677 __ bind(&arg1_is_object); | 9566 __ bind(&arg1_is_object); |
9678 __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); | 9567 __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); |
9679 __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex); | 9568 __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex); |
9680 __ j(not_equal, &check_undefined_arg1); | 9569 __ j(not_equal, &check_undefined_arg1); |
9681 // Get the untagged integer version of the edx heap number in rcx. | 9570 // Get the untagged integer version of the edx heap number in rcx. |
9682 IntegerConvert(masm, rdx, use_sse3, conversion_failure); | 9571 IntegerConvert(masm, rdx, conversion_failure); |
9683 __ movl(rdx, rcx); | 9572 __ movl(rdx, rcx); |
9684 | 9573 |
9685 // Here edx has the untagged integer, eax has a Smi or a heap number. | 9574 // Here rdx has the untagged integer, rax has a Smi or a heap number. |
9686 __ bind(&load_arg2); | 9575 __ bind(&load_arg2); |
9687 // Test if arg2 is a Smi. | 9576 // Test if arg2 is a Smi. |
9688 __ JumpIfNotSmi(rax, &arg2_is_object); | 9577 __ JumpIfNotSmi(rax, &arg2_is_object); |
9689 __ SmiToInteger32(rax, rax); | 9578 __ SmiToInteger32(rax, rax); |
9690 __ movl(rcx, rax); | 9579 __ movl(rcx, rax); |
9691 __ jmp(&done); | 9580 __ jmp(&done); |
9692 | 9581 |
9693 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). | 9582 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). |
9694 __ bind(&check_undefined_arg2); | 9583 __ bind(&check_undefined_arg2); |
9695 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); | 9584 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); |
9696 __ j(not_equal, conversion_failure); | 9585 __ j(not_equal, conversion_failure); |
9697 __ movl(rcx, Immediate(0)); | 9586 __ movl(rcx, Immediate(0)); |
9698 __ jmp(&done); | 9587 __ jmp(&done); |
9699 | 9588 |
9700 __ bind(&arg2_is_object); | 9589 __ bind(&arg2_is_object); |
9701 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); | 9590 __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); |
9702 __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex); | 9591 __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex); |
9703 __ j(not_equal, &check_undefined_arg2); | 9592 __ j(not_equal, &check_undefined_arg2); |
9704 // Get the untagged integer version of the eax heap number in ecx. | 9593 // Get the untagged integer version of the eax heap number in ecx. |
9705 IntegerConvert(masm, rax, use_sse3, conversion_failure); | 9594 IntegerConvert(masm, rax, conversion_failure); |
9706 __ bind(&done); | 9595 __ bind(&done); |
9707 __ movl(rax, rdx); | 9596 __ movl(rax, rdx); |
9708 } | 9597 } |
9709 | 9598 |
9710 | 9599 |
9711 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, | 9600 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, |
9712 Register lhs, | 9601 Register lhs, |
9713 Register rhs) { | 9602 Register rhs) { |
9714 Label load_smi_lhs, load_smi_rhs, done_load_lhs, done; | 9603 Label load_smi_lhs, load_smi_rhs, done_load_lhs, done; |
9715 __ JumpIfSmi(lhs, &load_smi_lhs); | 9604 __ JumpIfSmi(lhs, &load_smi_lhs); |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
9770 default: overwrite_name = "UnknownOverwrite"; break; | 9659 default: overwrite_name = "UnknownOverwrite"; break; |
9771 } | 9660 } |
9772 | 9661 |
9773 OS::SNPrintF(Vector<char>(name_, len), | 9662 OS::SNPrintF(Vector<char>(name_, len), |
9774 "GenericBinaryOpStub_%s_%s%s_%s%s_%s%s_%s", | 9663 "GenericBinaryOpStub_%s_%s%s_%s%s_%s%s_%s", |
9775 op_name, | 9664 op_name, |
9776 overwrite_name, | 9665 overwrite_name, |
9777 (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", | 9666 (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", |
9778 args_in_registers_ ? "RegArgs" : "StackArgs", | 9667 args_in_registers_ ? "RegArgs" : "StackArgs", |
9779 args_reversed_ ? "_R" : "", | 9668 args_reversed_ ? "_R" : "", |
9780 use_sse3_ ? "SSE3" : "SSE2", | |
9781 static_operands_type_.ToString(), | 9669 static_operands_type_.ToString(), |
9782 BinaryOpIC::GetName(runtime_operands_type_)); | 9670 BinaryOpIC::GetName(runtime_operands_type_)); |
9783 return name_; | 9671 return name_; |
9784 } | 9672 } |
9785 | 9673 |
9786 | 9674 |
9787 void GenericBinaryOpStub::GenerateCall( | 9675 void GenericBinaryOpStub::GenerateCall( |
9788 MacroAssembler* masm, | 9676 MacroAssembler* masm, |
9789 Register left, | 9677 Register left, |
9790 Register right) { | 9678 Register right) { |
(...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
10203 // For MOD we go directly to runtime in the non-smi case. | 10091 // For MOD we go directly to runtime in the non-smi case. |
10204 break; | 10092 break; |
10205 } | 10093 } |
10206 case Token::BIT_OR: | 10094 case Token::BIT_OR: |
10207 case Token::BIT_AND: | 10095 case Token::BIT_AND: |
10208 case Token::BIT_XOR: | 10096 case Token::BIT_XOR: |
10209 case Token::SAR: | 10097 case Token::SAR: |
10210 case Token::SHL: | 10098 case Token::SHL: |
10211 case Token::SHR: { | 10099 case Token::SHR: { |
10212 Label skip_allocation, non_smi_result; | 10100 Label skip_allocation, non_smi_result; |
10213 FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime); | 10101 FloatingPointHelper::LoadAsIntegers(masm, &call_runtime); |
10214 switch (op_) { | 10102 switch (op_) { |
10215 case Token::BIT_OR: __ orl(rax, rcx); break; | 10103 case Token::BIT_OR: __ orl(rax, rcx); break; |
10216 case Token::BIT_AND: __ andl(rax, rcx); break; | 10104 case Token::BIT_AND: __ andl(rax, rcx); break; |
10217 case Token::BIT_XOR: __ xorl(rax, rcx); break; | 10105 case Token::BIT_XOR: __ xorl(rax, rcx); break; |
10218 case Token::SAR: __ sarl_cl(rax); break; | 10106 case Token::SAR: __ sarl_cl(rax); break; |
10219 case Token::SHL: __ shll_cl(rax); break; | 10107 case Token::SHL: __ shll_cl(rax); break; |
10220 case Token::SHR: __ shrl_cl(rax); break; | 10108 case Token::SHR: __ shrl_cl(rax); break; |
10221 default: UNREACHABLE(); | 10109 default: UNREACHABLE(); |
10222 } | 10110 } |
10223 if (op_ == Token::SHR) { | 10111 if (op_ == Token::SHR) { |
10224 // Check if result is non-negative. This can only happen for a shift | 10112 // Check if result is negative. This can only happen for a shift |
10225 // by zero, which also doesn't update the sign flag. | 10113 // by zero, which also doesn't update the sign flag. |
10226 __ testl(rax, rax); | 10114 __ testl(rax, rax); |
10227 __ j(negative, &non_smi_result); | 10115 __ j(negative, &non_smi_result); |
10228 } | 10116 } |
10229 __ JumpIfNotValidSmiValue(rax, &non_smi_result); | 10117 __ JumpIfNotValidSmiValue(rax, &non_smi_result); |
10230 // Tag smi result, if possible, and return. | 10118 // Tag smi result, if possible, and return. |
10231 __ Integer32ToSmi(rax, rax); | 10119 __ Integer32ToSmi(rax, rax); |
10232 GenerateReturn(masm); | 10120 GenerateReturn(masm); |
10233 | 10121 |
10234 // All ops except SHR return a signed int32 that we load in | 10122 // All ops except SHR return a signed int32 that we load in |
(...skipping 1285 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
11520 // Call the function from C++. | 11408 // Call the function from C++. |
11521 return FUNCTION_CAST<ModuloFunction>(buffer); | 11409 return FUNCTION_CAST<ModuloFunction>(buffer); |
11522 } | 11410 } |
11523 | 11411 |
11524 #endif | 11412 #endif |
11525 | 11413 |
11526 | 11414 |
11527 #undef __ | 11415 #undef __ |
11528 | 11416 |
11529 } } // namespace v8::internal | 11417 } } // namespace v8::internal |
OLD | NEW |