OLD | NEW |
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 746 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
757 static void LoadFloatOperands(MacroAssembler* masm, Register scratch); | 757 static void LoadFloatOperands(MacroAssembler* masm, Register scratch); |
758 // Test if operands are smi or number objects (fp). Requirements: | 758 // Test if operands are smi or number objects (fp). Requirements: |
759 // operand_1 in eax, operand_2 in edx; falls through on float | 759 // operand_1 in eax, operand_2 in edx; falls through on float |
760 // operands, jumps to the non_float label otherwise. | 760 // operands, jumps to the non_float label otherwise. |
761 static void CheckFloatOperands(MacroAssembler* masm, | 761 static void CheckFloatOperands(MacroAssembler* masm, |
762 Label* non_float, | 762 Label* non_float, |
763 Register scratch); | 763 Register scratch); |
764 // Takes the operands in edx and eax and loads them as integers in eax | 764 // Takes the operands in edx and eax and loads them as integers in eax |
765 // and ecx. | 765 // and ecx. |
766 static void LoadAsIntegers(MacroAssembler* masm, | 766 static void LoadAsIntegers(MacroAssembler* masm, |
| 767 bool use_sse3, |
767 Label* operand_conversion_failure); | 768 Label* operand_conversion_failure); |
768 // Test if operands are numbers (smi or HeapNumber objects), and load | 769 // Test if operands are numbers (smi or HeapNumber objects), and load |
769 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if | 770 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if |
770 // either operand is not a number. Operands are in edx and eax. | 771 // either operand is not a number. Operands are in edx and eax. |
771 // Leaves operands unchanged. | 772 // Leaves operands unchanged. |
772 static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers); | 773 static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers); |
773 }; | 774 }; |
774 | 775 |
775 | 776 |
776 const char* GenericBinaryOpStub::GetName() { | 777 const char* GenericBinaryOpStub::GetName() { |
(...skipping 6458 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7235 case Token::BIT_OR: | 7236 case Token::BIT_OR: |
7236 case Token::BIT_AND: | 7237 case Token::BIT_AND: |
7237 case Token::BIT_XOR: | 7238 case Token::BIT_XOR: |
7238 case Token::SAR: | 7239 case Token::SAR: |
7239 case Token::SHL: | 7240 case Token::SHL: |
7240 case Token::SHR: { | 7241 case Token::SHR: { |
7241 Label non_smi_result, skip_allocation; | 7242 Label non_smi_result, skip_allocation; |
7242 Label operand_conversion_failure; | 7243 Label operand_conversion_failure; |
7243 FloatingPointHelper::LoadAsIntegers( | 7244 FloatingPointHelper::LoadAsIntegers( |
7244 masm, | 7245 masm, |
| 7246 use_sse3_, |
7245 &operand_conversion_failure); | 7247 &operand_conversion_failure); |
7246 switch (op_) { | 7248 switch (op_) { |
7247 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; | 7249 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; |
7248 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; | 7250 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; |
7249 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; | 7251 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; |
7250 case Token::SAR: __ sar_cl(eax); break; | 7252 case Token::SAR: __ sar_cl(eax); break; |
7251 case Token::SHL: __ shl_cl(eax); break; | 7253 case Token::SHL: __ shl_cl(eax); break; |
7252 case Token::SHR: __ shr_cl(eax); break; | 7254 case Token::SHR: __ shr_cl(eax); break; |
7253 default: UNREACHABLE(); | 7255 default: UNREACHABLE(); |
7254 } | 7256 } |
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7418 } | 7420 } |
7419 } | 7421 } |
7420 | 7422 |
7421 | 7423 |
7422 // Get the integer part of a heap number. Surprisingly, all this bit twiddling | 7424 // Get the integer part of a heap number. Surprisingly, all this bit twiddling |
7423 // is faster than using the built-in instructions on floating point registers. | 7425 // is faster than using the built-in instructions on floating point registers. |
7424 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the | 7426 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the |
7425 // trashed registers. | 7427 // trashed registers. |
7426 void IntegerConvert(MacroAssembler* masm, | 7428 void IntegerConvert(MacroAssembler* masm, |
7427 Register source, | 7429 Register source, |
| 7430 bool use_sse3, |
7428 Label* conversion_failure) { | 7431 Label* conversion_failure) { |
7429 Label done, right_exponent, normal_exponent; | 7432 Label done, right_exponent, normal_exponent; |
7430 Register scratch = ebx; | 7433 Register scratch = ebx; |
7431 Register scratch2 = edi; | 7434 Register scratch2 = edi; |
7432 // Get exponent word. | 7435 // Get exponent word. |
7433 __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); | 7436 __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); |
7434 // Get exponent alone in scratch2. | 7437 // Get exponent alone in scratch2. |
7435 __ mov(scratch2, scratch); | 7438 __ mov(scratch2, scratch); |
7436 __ and_(scratch2, HeapNumber::kExponentMask); | 7439 __ and_(scratch2, HeapNumber::kExponentMask); |
7437 // Load ecx with zero. We use this either for the final shift or | 7440 if (use_sse3) { |
7438 // for the answer. | 7441 CpuFeatures::Scope scope(SSE3); |
7439 __ xor_(ecx, Operand(ecx)); | 7442 // Check whether the exponent is too big for a 64 bit signed integer. |
7440 // Check whether the exponent matches a 32 bit signed int that cannot be | 7443 const uint32_t too_big_exponent = |
7441 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the | 7444 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; |
7442 // exponent is 30 (biased). This is the exponent that we are fastest at and | 7445 __ cmp(Operand(scratch2), Immediate(too_big_exponent)); |
7443 // also the highest exponent we can handle here. | 7446 __ j(greater_equal, conversion_failure); |
7444 const uint32_t non_smi_exponent = | 7447 // Load x87 register with heap number. |
7445 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; | 7448 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); |
7446 __ cmp(Operand(scratch2), Immediate(non_smi_exponent)); | 7449 // Reserve space for 64 bit answer. |
7447 // If we have a match of the int32-but-not-Smi exponent then skip some logic. | 7450 __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. |
7448 __ j(equal, &right_exponent); | 7451 // Do conversion, which cannot fail because we checked the exponent. |
7449 // If the exponent is higher than that then go to slow case. This catches | 7452 __ fisttp_d(Operand(esp, 0)); |
7450 // numbers that don't fit in a signed int32, infinities and NaNs. | 7453 __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. |
7451 __ j(less, &normal_exponent); | 7454 __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. |
| 7455 } else { |
| 7456 // Load ecx with zero. We use this either for the final shift or |
| 7457 // for the answer. |
| 7458 __ xor_(ecx, Operand(ecx)); |
| 7459 // Check whether the exponent matches a 32 bit signed int that cannot be |
| 7460 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the |
| 7461 // exponent is 30 (biased). This is the exponent that we are fastest at and |
| 7462 // also the highest exponent we can handle here. |
| 7463 const uint32_t non_smi_exponent = |
| 7464 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; |
| 7465 __ cmp(Operand(scratch2), Immediate(non_smi_exponent)); |
| 7466 // If we have a match of the int32-but-not-Smi exponent then skip some logic
. |
| 7467 __ j(equal, &right_exponent); |
| 7468 // If the exponent is higher than that then go to slow case. This catches |
| 7469 // numbers that don't fit in a signed int32, infinities and NaNs. |
| 7470 __ j(less, &normal_exponent); |
7452 | 7471 |
7453 { | 7472 { |
7454 // Handle a big exponent. The only reason we have this code is that the >>> | 7473 // Handle a big exponent. The only reason we have this code is that the >
>> |
7455 // operator has a tendency to generate numbers with an exponent of 31. | 7474 // operator has a tendency to generate numbers with an exponent of 31. |
7456 const uint32_t big_non_smi_exponent = | 7475 const uint32_t big_non_smi_exponent = |
7457 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; | 7476 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; |
7458 __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent)); | 7477 __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent)); |
7459 __ j(not_equal, conversion_failure); | 7478 __ j(not_equal, conversion_failure); |
7460 // We have the big exponent, typically from >>>. This means the number is | 7479 // We have the big exponent, typically from >>>. This means the number is |
7461 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. | 7480 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. |
7462 __ mov(scratch2, scratch); | 7481 __ mov(scratch2, scratch); |
7463 __ and_(scratch2, HeapNumber::kMantissaMask); | 7482 __ and_(scratch2, HeapNumber::kMantissaMask); |
| 7483 // Put back the implicit 1. |
| 7484 __ or_(scratch2, 1 << HeapNumber::kExponentShift); |
| 7485 // Shift up the mantissa bits to take up the space the exponent used to |
| 7486 // take. We just orred in the implicit bit so that took care of one and |
| 7487 // we want to use the full unsigned range so we subtract 1 bit from the |
| 7488 // shift distance. |
| 7489 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1; |
| 7490 __ shl(scratch2, big_shift_distance); |
| 7491 // Get the second half of the double. |
| 7492 __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset)); |
| 7493 // Shift down 21 bits to get the most significant 11 bits or the low |
| 7494 // mantissa word. |
| 7495 __ shr(ecx, 32 - big_shift_distance); |
| 7496 __ or_(ecx, Operand(scratch2)); |
| 7497 // We have the answer in ecx, but we may need to negate it. |
| 7498 __ test(scratch, Operand(scratch)); |
| 7499 __ j(positive, &done); |
| 7500 __ neg(ecx); |
| 7501 __ jmp(&done); |
| 7502 } |
| 7503 |
| 7504 __ bind(&normal_exponent); |
| 7505 // Exponent word in scratch, exponent part of exponent word in scratch2. |
| 7506 // Zero in ecx. |
| 7507 // We know the exponent is smaller than 30 (biased). If it is less than |
| 7508 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie |
| 7509 // it rounds to zero. |
| 7510 const uint32_t zero_exponent = |
| 7511 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; |
| 7512 __ sub(Operand(scratch2), Immediate(zero_exponent)); |
| 7513 // ecx already has a Smi zero. |
| 7514 __ j(less, &done); |
| 7515 |
| 7516 // We have a shifted exponent between 0 and 30 in scratch2. |
| 7517 __ shr(scratch2, HeapNumber::kExponentShift); |
| 7518 __ mov(ecx, Immediate(30)); |
| 7519 __ sub(ecx, Operand(scratch2)); |
| 7520 |
| 7521 __ bind(&right_exponent); |
| 7522 // Here ecx is the shift, scratch is the exponent word. |
| 7523 // Get the top bits of the mantissa. |
| 7524 __ and_(scratch, HeapNumber::kMantissaMask); |
7464 // Put back the implicit 1. | 7525 // Put back the implicit 1. |
7465 __ or_(scratch2, 1 << HeapNumber::kExponentShift); | 7526 __ or_(scratch, 1 << HeapNumber::kExponentShift); |
7466 // Shift up the mantissa bits to take up the space the exponent used to | 7527 // Shift up the mantissa bits to take up the space the exponent used to |
7467 // take. We just orred in the implicit bit so that took care of one and | 7528 // take. We have kExponentShift + 1 significant bits int he low end of the |
7468 // we want to use the full unsigned range so we subtract 1 bit from the | 7529 // word. Shift them to the top bits. |
7469 // shift distance. | 7530 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
7470 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1; | 7531 __ shl(scratch, shift_distance); |
7471 __ shl(scratch2, big_shift_distance); | 7532 // Get the second half of the double. For some exponents we don't |
7472 // Get the second half of the double. | 7533 // actually need this because the bits get shifted out again, but |
7473 __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset)); | 7534 // it's probably slower to test than just to do it. |
7474 // Shift down 21 bits to get the most significant 11 bits or the low | 7535 __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset)); |
7475 // mantissa word. | 7536 // Shift down 22 bits to get the most significant 10 bits or the low mantiss
a |
7476 __ shr(ecx, 32 - big_shift_distance); | 7537 // word. |
7477 __ or_(ecx, Operand(scratch2)); | 7538 __ shr(scratch2, 32 - shift_distance); |
7478 // We have the answer in ecx, but we may need to negate it. | 7539 __ or_(scratch2, Operand(scratch)); |
7479 __ test(scratch, Operand(scratch)); | 7540 // Move down according to the exponent. |
7480 __ j(positive, &done); | 7541 __ shr_cl(scratch2); |
7481 __ neg(ecx); | 7542 // Now the unsigned answer is in scratch2. We need to move it to ecx and |
| 7543 // we may need to fix the sign. |
| 7544 Label negative; |
| 7545 __ xor_(ecx, Operand(ecx)); |
| 7546 __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); |
| 7547 __ j(greater, &negative); |
| 7548 __ mov(ecx, scratch2); |
7482 __ jmp(&done); | 7549 __ jmp(&done); |
| 7550 __ bind(&negative); |
| 7551 __ sub(ecx, Operand(scratch2)); |
| 7552 __ bind(&done); |
7483 } | 7553 } |
7484 | |
7485 __ bind(&normal_exponent); | |
7486 // Exponent word in scratch, exponent part of exponent word in scratch2. | |
7487 // Zero in ecx. | |
7488 // We know the exponent is smaller than 30 (biased). If it is less than | |
7489 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie | |
7490 // it rounds to zero. | |
7491 const uint32_t zero_exponent = | |
7492 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; | |
7493 __ sub(Operand(scratch2), Immediate(zero_exponent)); | |
7494 // ecx already has a Smi zero. | |
7495 __ j(less, &done); | |
7496 | |
7497 // We have a shifted exponent between 0 and 30 in scratch2. | |
7498 __ shr(scratch2, HeapNumber::kExponentShift); | |
7499 __ mov(ecx, Immediate(30)); | |
7500 __ sub(ecx, Operand(scratch2)); | |
7501 | |
7502 __ bind(&right_exponent); | |
7503 // Here ecx is the shift, scratch is the exponent word. | |
7504 // Get the top bits of the mantissa. | |
7505 __ and_(scratch, HeapNumber::kMantissaMask); | |
7506 // Put back the implicit 1. | |
7507 __ or_(scratch, 1 << HeapNumber::kExponentShift); | |
7508 // Shift up the mantissa bits to take up the space the exponent used to | |
7509 // take. We have kExponentShift + 1 significant bits int he low end of the | |
7510 // word. Shift them to the top bits. | |
7511 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | |
7512 __ shl(scratch, shift_distance); | |
7513 // Get the second half of the double. For some exponents we don't | |
7514 // actually need this because the bits get shifted out again, but | |
7515 // it's probably slower to test than just to do it. | |
7516 __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset)); | |
7517 // Shift down 22 bits to get the most significant 10 bits or the low mantissa | |
7518 // word. | |
7519 __ shr(scratch2, 32 - shift_distance); | |
7520 __ or_(scratch2, Operand(scratch)); | |
7521 // Move down according to the exponent. | |
7522 __ shr_cl(scratch2); | |
7523 // Now the unsigned answer is in scratch2. We need to move it to ecx and | |
7524 // we may need to fix the sign. | |
7525 Label negative; | |
7526 __ xor_(ecx, Operand(ecx)); | |
7527 __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); | |
7528 __ j(greater, &negative); | |
7529 __ mov(ecx, scratch2); | |
7530 __ jmp(&done); | |
7531 __ bind(&negative); | |
7532 __ sub(ecx, Operand(scratch2)); | |
7533 __ bind(&done); | |
7534 } | 7554 } |
7535 | 7555 |
7536 | 7556 |
7537 // Input: edx, eax are the left and right objects of a bit op. | 7557 // Input: edx, eax are the left and right objects of a bit op. |
7538 // Output: eax, ecx are left and right integers for a bit op. | 7558 // Output: eax, ecx are left and right integers for a bit op. |
7539 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, | 7559 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
| 7560 bool use_sse3, |
7540 Label* conversion_failure) { | 7561 Label* conversion_failure) { |
7541 // Check float operands. | 7562 // Check float operands. |
7542 Label arg1_is_object, arg2_is_object, load_arg2; | 7563 Label arg1_is_object, arg2_is_object, load_arg2; |
7543 Label done; | 7564 Label done; |
7544 | 7565 |
7545 __ test(edx, Immediate(kSmiTagMask)); | 7566 __ test(edx, Immediate(kSmiTagMask)); |
7546 __ j(not_zero, &arg1_is_object); | 7567 __ j(not_zero, &arg1_is_object); |
7547 __ sar(edx, kSmiTagSize); | 7568 __ sar(edx, kSmiTagSize); |
7548 __ jmp(&load_arg2); | 7569 __ jmp(&load_arg2); |
7549 | 7570 |
7550 __ bind(&arg1_is_object); | 7571 __ bind(&arg1_is_object); |
7551 __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); | 7572 __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); |
7552 __ cmp(ebx, Factory::heap_number_map()); | 7573 __ cmp(ebx, Factory::heap_number_map()); |
7553 __ j(not_equal, conversion_failure); | 7574 __ j(not_equal, conversion_failure); |
7554 // Get the untagged integer version of the edx heap number in ecx. | 7575 // Get the untagged integer version of the edx heap number in ecx. |
7555 IntegerConvert(masm, edx, conversion_failure); | 7576 IntegerConvert(masm, edx, use_sse3, conversion_failure); |
7556 __ mov(edx, ecx); | 7577 __ mov(edx, ecx); |
7557 | 7578 |
7558 // Here edx has the untagged integer, eax has a Smi or a heap number. | 7579 // Here edx has the untagged integer, eax has a Smi or a heap number. |
7559 __ bind(&load_arg2); | 7580 __ bind(&load_arg2); |
7560 // Test if arg2 is a Smi. | 7581 // Test if arg2 is a Smi. |
7561 __ test(eax, Immediate(kSmiTagMask)); | 7582 __ test(eax, Immediate(kSmiTagMask)); |
7562 __ j(not_zero, &arg2_is_object); | 7583 __ j(not_zero, &arg2_is_object); |
7563 __ sar(eax, kSmiTagSize); | 7584 __ sar(eax, kSmiTagSize); |
7564 __ mov(ecx, eax); | 7585 __ mov(ecx, eax); |
7565 __ jmp(&done); | 7586 __ jmp(&done); |
7566 | 7587 |
7567 __ bind(&arg2_is_object); | 7588 __ bind(&arg2_is_object); |
7568 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); | 7589 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); |
7569 __ cmp(ebx, Factory::heap_number_map()); | 7590 __ cmp(ebx, Factory::heap_number_map()); |
7570 __ j(not_equal, conversion_failure); | 7591 __ j(not_equal, conversion_failure); |
7571 // Get the untagged integer version of the eax heap number in ecx. | 7592 // Get the untagged integer version of the eax heap number in ecx. |
7572 IntegerConvert(masm, eax, conversion_failure); | 7593 IntegerConvert(masm, eax, use_sse3, conversion_failure); |
7573 __ bind(&done); | 7594 __ bind(&done); |
7574 __ mov(eax, edx); | 7595 __ mov(eax, edx); |
7575 } | 7596 } |
7576 | 7597 |
7577 | 7598 |
7578 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, | 7599 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, |
7579 Register number) { | 7600 Register number) { |
7580 Label load_smi, done; | 7601 Label load_smi, done; |
7581 | 7602 |
7582 __ test(number, Immediate(kSmiTagMask)); | 7603 __ test(number, Immediate(kSmiTagMask)); |
(...skipping 1329 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
8912 __ add(Operand(dest), Immediate(2)); | 8933 __ add(Operand(dest), Immediate(2)); |
8913 } | 8934 } |
8914 __ sub(Operand(count), Immediate(1)); | 8935 __ sub(Operand(count), Immediate(1)); |
8915 __ j(not_zero, &loop); | 8936 __ j(not_zero, &loop); |
8916 } | 8937 } |
8917 | 8938 |
8918 | 8939 |
8919 #undef __ | 8940 #undef __ |
8920 | 8941 |
8921 } } // namespace v8::internal | 8942 } } // namespace v8::internal |
OLD | NEW |