OLD | NEW |
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 4584 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4595 } | 4595 } |
4596 } | 4596 } |
4597 __ mov(pc, Operand(lr)); // Return. | 4597 __ mov(pc, Operand(lr)); // Return. |
4598 } | 4598 } |
4599 // No fall through here. | 4599 // No fall through here. |
4600 | 4600 |
4601 __ bind(¬_identical); | 4601 __ bind(¬_identical); |
4602 } | 4602 } |
4603 | 4603 |
4604 | 4604 |
| 4605 static void IntegerToDoubleConversionWithVFP3(MacroAssembler* masm, |
| 4606 Register inReg, |
| 4607 Register outHighReg, |
| 4608 Register outLowReg) { |
| 4609 // ARMv7 VFP3 instructions to implement integer to double conversion. |
| 4610 // This VFP3 implementation is known to work |
| 4611 // on ARMv7-VFP3 Snapdragon processor. |
| 4612 |
| 4613 __ mov(r7, Operand(inReg, ASR, kSmiTagSize)); |
| 4614 __ fmsr(s15, r7); |
| 4615 __ fsitod(d7, s15); |
| 4616 __ fmrrd(outLowReg, outHighReg, d7); |
| 4617 } |
| 4618 |
| 4619 |
4605 // See comment at call site. | 4620 // See comment at call site. |
4606 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 4621 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
4607 Label* rhs_not_nan, | 4622 Label* rhs_not_nan, |
4608 Label* slow, | 4623 Label* slow, |
4609 bool strict) { | 4624 bool strict) { |
4610 Label lhs_is_smi; | 4625 Label lhs_is_smi; |
4611 __ tst(r0, Operand(kSmiTagMask)); | 4626 __ tst(r0, Operand(kSmiTagMask)); |
4612 __ b(eq, &lhs_is_smi); | 4627 __ b(eq, &lhs_is_smi); |
4613 | 4628 |
4614 // Rhs is a Smi. Check whether the non-smi is a heap number. | 4629 // Rhs is a Smi. Check whether the non-smi is a heap number. |
4615 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 4630 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); |
4616 if (strict) { | 4631 if (strict) { |
4617 // If lhs was not a number and rhs was a Smi then strict equality cannot | 4632 // If lhs was not a number and rhs was a Smi then strict equality cannot |
4618 // succeed. Return non-equal (r0 is already not zero) | 4633 // succeed. Return non-equal (r0 is already not zero) |
4619 __ mov(pc, Operand(lr), LeaveCC, ne); // Return. | 4634 __ mov(pc, Operand(lr), LeaveCC, ne); // Return. |
4620 } else { | 4635 } else { |
4621 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 4636 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
4622 // the runtime. | 4637 // the runtime. |
4623 __ b(ne, slow); | 4638 __ b(ne, slow); |
4624 } | 4639 } |
4625 | 4640 |
4626 // Rhs is a smi, lhs is a number. | 4641 // Rhs is a smi, lhs is a number. |
4627 __ push(lr); | 4642 __ push(lr); |
4628 __ mov(r7, Operand(r1)); | 4643 |
4629 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 4644 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { |
4630 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 4645 IntegerToDoubleConversionWithVFP3(masm, r1, r3, r2); |
| 4646 } else { |
| 4647 __ mov(r7, Operand(r1)); |
| 4648 ConvertToDoubleStub stub1(r3, r2, r7, r6); |
| 4649 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
| 4650 } |
| 4651 |
| 4652 |
4631 // r3 and r2 are rhs as double. | 4653 // r3 and r2 are rhs as double. |
4632 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); | 4654 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); |
4633 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 4655 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
4634 // We now have both loaded as doubles but we can skip the lhs nan check | 4656 // We now have both loaded as doubles but we can skip the lhs nan check |
4635 // since it's a Smi. | 4657 // since it's a Smi. |
4636 __ pop(lr); | 4658 __ pop(lr); |
4637 __ jmp(rhs_not_nan); | 4659 __ jmp(rhs_not_nan); |
4638 | 4660 |
4639 __ bind(&lhs_is_smi); | 4661 __ bind(&lhs_is_smi); |
4640 // Lhs is a Smi. Check whether the non-smi is a heap number. | 4662 // Lhs is a Smi. Check whether the non-smi is a heap number. |
4641 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); | 4663 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); |
4642 if (strict) { | 4664 if (strict) { |
4643 // If lhs was not a number and rhs was a Smi then strict equality cannot | 4665 // If lhs was not a number and rhs was a Smi then strict equality cannot |
4644 // succeed. Return non-equal. | 4666 // succeed. Return non-equal. |
4645 __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal. | 4667 __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal. |
4646 __ mov(pc, Operand(lr), LeaveCC, ne); // Return. | 4668 __ mov(pc, Operand(lr), LeaveCC, ne); // Return. |
4647 } else { | 4669 } else { |
4648 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 4670 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
4649 // the runtime. | 4671 // the runtime. |
4650 __ b(ne, slow); | 4672 __ b(ne, slow); |
4651 } | 4673 } |
4652 | 4674 |
4653 // Lhs is a smi, rhs is a number. | 4675 // Lhs is a smi, rhs is a number. |
4654 // r0 is Smi and r1 is heap number. | 4676 // r0 is Smi and r1 is heap number. |
4655 __ push(lr); | 4677 __ push(lr); |
4656 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 4678 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
4657 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); | 4679 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); |
4658 __ mov(r7, Operand(r0)); | 4680 |
4659 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 4681 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { |
4660 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 4682 IntegerToDoubleConversionWithVFP3(masm, r0, r1, r0); |
| 4683 } else { |
| 4684 __ mov(r7, Operand(r0)); |
| 4685 ConvertToDoubleStub stub2(r1, r0, r7, r6); |
| 4686 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
| 4687 } |
| 4688 |
4661 __ pop(lr); | 4689 __ pop(lr); |
4662 // Fall through to both_loaded_as_doubles. | 4690 // Fall through to both_loaded_as_doubles. |
4663 } | 4691 } |
4664 | 4692 |
4665 | 4693 |
4666 void EmitNanCheck(MacroAssembler* masm, Label* rhs_not_nan, Condition cc) { | 4694 void EmitNanCheck(MacroAssembler* masm, Label* rhs_not_nan, Condition cc) { |
4667 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 4695 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
4668 Register lhs_exponent = exp_first ? r0 : r1; | 4696 Register lhs_exponent = exp_first ? r0 : r1; |
4669 Register rhs_exponent = exp_first ? r2 : r3; | 4697 Register rhs_exponent = exp_first ? r2 : r3; |
4670 Register lhs_mantissa = exp_first ? r1 : r0; | 4698 Register lhs_mantissa = exp_first ? r1 : r0; |
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4853 EmitSmiNonsmiComparison(masm, &rhs_not_nan, &slow, strict_); | 4881 EmitSmiNonsmiComparison(masm, &rhs_not_nan, &slow, strict_); |
4854 | 4882 |
4855 __ bind(&both_loaded_as_doubles); | 4883 __ bind(&both_loaded_as_doubles); |
4856 // r0, r1, r2, r3 are the double representations of the left hand side | 4884 // r0, r1, r2, r3 are the double representations of the left hand side |
4857 // and the right hand side. | 4885 // and the right hand side. |
4858 | 4886 |
4859 // Checks for NaN in the doubles we have loaded. Can return the answer or | 4887 // Checks for NaN in the doubles we have loaded. Can return the answer or |
4860 // fall through if neither is a NaN. Also binds rhs_not_nan. | 4888 // fall through if neither is a NaN. Also binds rhs_not_nan. |
4861 EmitNanCheck(masm, &rhs_not_nan, cc_); | 4889 EmitNanCheck(masm, &rhs_not_nan, cc_); |
4862 | 4890 |
4863 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the | 4891 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { |
4864 // answer. Never falls through. | 4892 // ARMv7 VFP3 instructions to implement double precision comparison. |
4865 EmitTwoNonNanDoubleComparison(masm, cc_); | 4893 // This VFP3 implementation is known to work on |
| 4894 // ARMv7-VFP3 Snapdragon processor. |
| 4895 |
| 4896 __ fmdrr(d6, r0, r1); |
| 4897 __ fmdrr(d7, r2, r3); |
| 4898 |
| 4899 __ fcmp(d6, d7); |
| 4900 __ vmrs(pc); |
| 4901 __ mov(r0, Operand(0), LeaveCC, eq); |
| 4902 __ mov(r0, Operand(1), LeaveCC, lt); |
| 4903 __ mvn(r0, Operand(0), LeaveCC, gt); |
| 4904 __ mov(pc, Operand(lr)); |
| 4905 } else { |
| 4906 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the |
| 4907 // answer. Never falls through. |
| 4908 EmitTwoNonNanDoubleComparison(masm, cc_); |
| 4909 } |
4866 | 4910 |
4867 __ bind(¬_smis); | 4911 __ bind(¬_smis); |
4868 // At this point we know we are dealing with two different objects, | 4912 // At this point we know we are dealing with two different objects, |
4869 // and neither of them is a Smi. The objects are in r0 and r1. | 4913 // and neither of them is a Smi. The objects are in r0 and r1. |
4870 if (strict_) { | 4914 if (strict_) { |
4871 // This returns non-equal for some object types, or falls through if it | 4915 // This returns non-equal for some object types, or falls through if it |
4872 // was not lucky. | 4916 // was not lucky. |
4873 EmitStrictTwoHeapObjectCompare(masm); | 4917 EmitStrictTwoHeapObjectCompare(masm); |
4874 } | 4918 } |
4875 | 4919 |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4955 Label* not_smi, | 4999 Label* not_smi, |
4956 const Builtins::JavaScript& builtin, | 5000 const Builtins::JavaScript& builtin, |
4957 Token::Value operation, | 5001 Token::Value operation, |
4958 OverwriteMode mode) { | 5002 OverwriteMode mode) { |
4959 Label slow, slow_pop_2_first, do_the_call; | 5003 Label slow, slow_pop_2_first, do_the_call; |
4960 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; | 5004 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; |
4961 // Smi-smi case (overflow). | 5005 // Smi-smi case (overflow). |
4962 // Since both are Smis there is no heap number to overwrite, so allocate. | 5006 // Since both are Smis there is no heap number to overwrite, so allocate. |
4963 // The new heap number is in r5. r6 and r7 are scratch. | 5007 // The new heap number is in r5. r6 and r7 are scratch. |
4964 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 5008 AllocateHeapNumber(masm, &slow, r5, r6, r7); |
4965 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. | 5009 |
4966 __ mov(r7, Operand(r0)); | 5010 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { |
4967 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 5011 IntegerToDoubleConversionWithVFP3(masm, r0, r3, r2); |
4968 __ push(lr); | 5012 IntegerToDoubleConversionWithVFP3(masm, r1, r1, r0); |
4969 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 5013 } else { |
4970 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. | 5014 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. |
4971 __ mov(r7, Operand(r1)); | 5015 __ mov(r7, Operand(r0)); |
4972 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 5016 ConvertToDoubleStub stub1(r3, r2, r7, r6); |
4973 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 5017 __ push(lr); |
4974 __ pop(lr); | 5018 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
| 5019 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. |
| 5020 __ mov(r7, Operand(r1)); |
| 5021 ConvertToDoubleStub stub2(r1, r0, r7, r6); |
| 5022 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
| 5023 __ pop(lr); |
| 5024 } |
| 5025 |
4975 __ jmp(&do_the_call); // Tail call. No return. | 5026 __ jmp(&do_the_call); // Tail call. No return. |
4976 | 5027 |
4977 // We jump to here if something goes wrong (one param is not a number of any | 5028 // We jump to here if something goes wrong (one param is not a number of any |
4978 // sort or new-space allocation fails). | 5029 // sort or new-space allocation fails). |
4979 __ bind(&slow); | 5030 __ bind(&slow); |
4980 __ push(r1); | 5031 __ push(r1); |
4981 __ push(r0); | 5032 __ push(r0); |
4982 __ mov(r0, Operand(1)); // Set number of arguments. | 5033 __ mov(r0, Operand(1)); // Set number of arguments. |
4983 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. | 5034 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. |
4984 | 5035 |
(...skipping 15 matching lines...) Expand all Loading... |
5000 } | 5051 } |
5001 // Calling convention says that second double is in r2 and r3. | 5052 // Calling convention says that second double is in r2 and r3. |
5002 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 5053 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
5003 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); | 5054 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); |
5004 __ jmp(&finished_loading_r0); | 5055 __ jmp(&finished_loading_r0); |
5005 __ bind(&r0_is_smi); | 5056 __ bind(&r0_is_smi); |
5006 if (mode == OVERWRITE_RIGHT) { | 5057 if (mode == OVERWRITE_RIGHT) { |
5007 // We can't overwrite a Smi so get address of new heap number into r5. | 5058 // We can't overwrite a Smi so get address of new heap number into r5. |
5008 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 5059 AllocateHeapNumber(masm, &slow, r5, r6, r7); |
5009 } | 5060 } |
5010 // Write Smi from r0 to r3 and r2 in double format. | 5061 |
5011 __ mov(r7, Operand(r0)); | 5062 |
5012 ConvertToDoubleStub stub3(r3, r2, r7, r6); | 5063 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { |
5013 __ push(lr); | 5064 IntegerToDoubleConversionWithVFP3(masm, r0, r3, r2); |
5014 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); | 5065 } else { |
5015 __ pop(lr); | 5066 // Write Smi from r0 to r3 and r2 in double format. |
| 5067 __ mov(r7, Operand(r0)); |
| 5068 ConvertToDoubleStub stub3(r3, r2, r7, r6); |
| 5069 __ push(lr); |
| 5070 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); |
| 5071 __ pop(lr); |
| 5072 } |
| 5073 |
5016 __ bind(&finished_loading_r0); | 5074 __ bind(&finished_loading_r0); |
5017 | 5075 |
5018 // Move r1 to a double in r0-r1. | 5076 // Move r1 to a double in r0-r1. |
5019 __ tst(r1, Operand(kSmiTagMask)); | 5077 __ tst(r1, Operand(kSmiTagMask)); |
5020 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. | 5078 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. |
5021 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); | 5079 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); |
5022 __ b(ne, &slow); | 5080 __ b(ne, &slow); |
5023 if (mode == OVERWRITE_LEFT) { | 5081 if (mode == OVERWRITE_LEFT) { |
5024 __ mov(r5, Operand(r1)); // Overwrite this heap number. | 5082 __ mov(r5, Operand(r1)); // Overwrite this heap number. |
5025 } | 5083 } |
5026 // Calling convention says that first double is in r0 and r1. | 5084 // Calling convention says that first double is in r0 and r1. |
5027 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 5085 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
5028 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); | 5086 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); |
5029 __ jmp(&finished_loading_r1); | 5087 __ jmp(&finished_loading_r1); |
5030 __ bind(&r1_is_smi); | 5088 __ bind(&r1_is_smi); |
5031 if (mode == OVERWRITE_LEFT) { | 5089 if (mode == OVERWRITE_LEFT) { |
5032 // We can't overwrite a Smi so get address of new heap number into r5. | 5090 // We can't overwrite a Smi so get address of new heap number into r5. |
5033 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 5091 AllocateHeapNumber(masm, &slow, r5, r6, r7); |
5034 } | 5092 } |
5035 // Write Smi from r1 to r1 and r0 in double format. | 5093 |
5036 __ mov(r7, Operand(r1)); | 5094 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { |
5037 ConvertToDoubleStub stub4(r1, r0, r7, r6); | 5095 IntegerToDoubleConversionWithVFP3(masm, r1, r1, r0); |
5038 __ push(lr); | 5096 } else { |
5039 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); | 5097 // Write Smi from r1 to r1 and r0 in double format. |
5040 __ pop(lr); | 5098 __ mov(r7, Operand(r1)); |
| 5099 ConvertToDoubleStub stub4(r1, r0, r7, r6); |
| 5100 __ push(lr); |
| 5101 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); |
| 5102 __ pop(lr); |
| 5103 } |
| 5104 |
5041 __ bind(&finished_loading_r1); | 5105 __ bind(&finished_loading_r1); |
5042 | 5106 |
5043 __ bind(&do_the_call); | 5107 __ bind(&do_the_call); |
5044 // r0: Left value (least significant part of mantissa). | 5108 // r0: Left value (least significant part of mantissa). |
5045 // r1: Left value (sign, exponent, top of mantissa). | 5109 // r1: Left value (sign, exponent, top of mantissa). |
5046 // r2: Right value (least significant part of mantissa). | 5110 // r2: Right value (least significant part of mantissa). |
5047 // r3: Right value (sign, exponent, top of mantissa). | 5111 // r3: Right value (sign, exponent, top of mantissa). |
5048 // r5: Address of heap number for result. | 5112 // r5: Address of heap number for result. |
| 5113 |
| 5114 if (CpuFeatures::IsSupported(CpuFeatures::VFP3) && |
| 5115 ((Token::MUL == operation) || |
| 5116 (Token::DIV == operation) || |
| 5117 (Token::ADD == operation) || |
| 5118 (Token::SUB == operation))) { |
| 5119 // ARMv7 VFP3 instructions to implement |
| 5120 // double precision, add, subtract, multiply, divide. |
| 5121 // This VFP3 implementation is known to work on |
| 5122 // ARMv7-VFP3 Snapdragon processor |
| 5123 |
| 5124 __ fmdrr(d6, r0, r1); |
| 5125 __ fmdrr(d7, r2, r3); |
| 5126 |
| 5127 if (Token::MUL == operation) __ fmuld(d5, d6, d7); |
| 5128 else if (Token::DIV == operation) __ fdivd(d5, d6, d7); |
| 5129 else if (Token::ADD == operation) __ faddd(d5, d6, d7); |
| 5130 else if (Token::SUB == operation) __ fsubd(d5, d6, d7); |
| 5131 |
| 5132 __ fmrrd(r0, r1, d5); |
| 5133 |
| 5134 __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset)); |
| 5135 __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4)); |
| 5136 __ mov(r0, Operand(r5)); |
| 5137 __ mov(pc, lr); |
| 5138 return; |
| 5139 } |
5049 __ push(lr); // For later. | 5140 __ push(lr); // For later. |
5050 __ push(r5); // Address of heap number that is answer. | 5141 __ push(r5); // Address of heap number that is answer. |
5051 __ AlignStack(0); | 5142 __ AlignStack(0); |
5052 // Call C routine that may not cause GC or other trouble. | 5143 // Call C routine that may not cause GC or other trouble. |
5053 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation))); | 5144 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation))); |
5054 __ Call(r5); | 5145 __ Call(r5); |
5055 __ pop(r4); // Address of heap number. | 5146 __ pop(r4); // Address of heap number. |
5056 __ cmp(r4, Operand(Smi::FromInt(0))); | 5147 __ cmp(r4, Operand(Smi::FromInt(0))); |
5057 __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push. | 5148 __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push. |
5058 // Store answer in the overwritable heap number. | 5149 // Store answer in the overwritable heap number. |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5107 __ b(gt, slow); | 5198 __ b(gt, slow); |
5108 | 5199 |
5109 // We know the exponent is smaller than 30 (biased). If it is less than | 5200 // We know the exponent is smaller than 30 (biased). If it is less than |
5110 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie | 5201 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie |
5111 // it rounds to zero. | 5202 // it rounds to zero. |
5112 const uint32_t zero_exponent = | 5203 const uint32_t zero_exponent = |
5113 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; | 5204 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; |
5114 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC); | 5205 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC); |
5115 // Dest already has a Smi zero. | 5206 // Dest already has a Smi zero. |
5116 __ b(lt, &done); | 5207 __ b(lt, &done); |
5117 // We have a shifted exponent between 0 and 30 in scratch2. | 5208 if (!CpuFeatures::IsSupported(CpuFeatures::VFP3)) { |
5118 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift)); | 5209 // We have a shifted exponent between 0 and 30 in scratch2. |
5119 // We now have the exponent in dest. Subtract from 30 to get | 5210 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift)); |
5120 // how much to shift down. | 5211 // We now have the exponent in dest. Subtract from 30 to get |
5121 __ rsb(dest, dest, Operand(30)); | 5212 // how much to shift down. |
5122 | 5213 __ rsb(dest, dest, Operand(30)); |
| 5214 } |
5123 __ bind(&right_exponent); | 5215 __ bind(&right_exponent); |
5124 // Get the top bits of the mantissa. | 5216 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { |
5125 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); | 5217 // ARMv7 VFP3 instructions implementing double precision to integer |
5126 // Put back the implicit 1. | 5218 // conversion using round to zero. |
5127 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); | 5219 // This VFP3 implementation is known to work on |
5128 // Shift up the mantissa bits to take up the space the exponent used to take. | 5220 // ARMv7-VFP3 Snapdragon processor. |
5129 // We just orred in the implicit bit so that took care of one and we want to | 5221 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); |
5130 // leave the sign bit 0 so we subtract 2 bits from the shift distance. | 5222 __ fmdrr(d7, scratch2, scratch); |
5131 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | 5223 __ ftosid(s15, d7); |
5132 __ mov(scratch2, Operand(scratch2, LSL, shift_distance)); | 5224 __ fmrs(dest, s15); |
5133 // Put sign in zero flag. | 5225 } else { |
5134 __ tst(scratch, Operand(HeapNumber::kSignMask)); | 5226 // Get the top bits of the mantissa. |
5135 // Get the second half of the double. For some exponents we don't actually | 5227 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); |
5136 // need this because the bits get shifted out again, but it's probably slower | 5228 // Put back the implicit 1. |
5137 // to test than just to do it. | 5229 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); |
5138 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | 5230 // Shift up the mantissa bits to take up the space the exponent used to |
5139 // Shift down 22 bits to get the last 10 bits. | 5231 // take. We just orred in the implicit bit so that took care of one and |
5140 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); | 5232 // we want to leave the sign bit 0 so we subtract 2 bits from the shift |
5141 // Move down according to the exponent. | 5233 // distance. |
5142 __ mov(dest, Operand(scratch, LSR, dest)); | 5234 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
5143 // Fix sign if sign bit was set. | 5235 __ mov(scratch2, Operand(scratch2, LSL, shift_distance)); |
5144 __ rsb(dest, dest, Operand(0), LeaveCC, ne); | 5236 // Put sign in zero flag. |
| 5237 __ tst(scratch, Operand(HeapNumber::kSignMask)); |
| 5238 // Get the second half of the double. For some exponents we don't |
| 5239 // actually need this because the bits get shifted out again, but |
| 5240 // it's probably slower to test than just to do it. |
| 5241 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); |
| 5242 // Shift down 22 bits to get the last 10 bits. |
| 5243 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); |
| 5244 // Move down according to the exponent. |
| 5245 __ mov(dest, Operand(scratch, LSR, dest)); |
| 5246 // Fix sign if sign bit was set. |
| 5247 __ rsb(dest, dest, Operand(0), LeaveCC, ne); |
| 5248 } |
5145 __ bind(&done); | 5249 __ bind(&done); |
5146 } | 5250 } |
5147 | 5251 |
5148 | |
5149 // For bitwise ops where the inputs are not both Smis we here try to determine | 5252 // For bitwise ops where the inputs are not both Smis we here try to determine |
5150 // whether both inputs are either Smis or at least heap numbers that can be | 5253 // whether both inputs are either Smis or at least heap numbers that can be |
5151 // represented by a 32 bit signed value. We truncate towards zero as required | 5254 // represented by a 32 bit signed value. We truncate towards zero as required |
5152 // by the ES spec. If this is the case we do the bitwise op and see if the | 5255 // by the ES spec. If this is the case we do the bitwise op and see if the |
5153 // result is a Smi. If so, great, otherwise we try to find a heap number to | 5256 // result is a Smi. If so, great, otherwise we try to find a heap number to |
5154 // write the answer into (either by allocating or by overwriting). | 5257 // write the answer into (either by allocating or by overwriting). |
5155 // On entry the operands are in r0 and r1. On exit the answer is in r0. | 5258 // On entry the operands are in r0 and r1. On exit the answer is in r0. |
5156 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { | 5259 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { |
5157 Label slow, result_not_a_smi; | 5260 Label slow, result_not_a_smi; |
5158 Label r0_is_smi, r1_is_smi; | 5261 Label r0_is_smi, r1_is_smi; |
5159 Label done_checking_r0, done_checking_r1; | 5262 Label done_checking_r0, done_checking_r1; |
5160 | 5263 |
5161 __ tst(r1, Operand(kSmiTagMask)); | 5264 __ tst(r1, Operand(kSmiTagMask)); |
5162 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. | 5265 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. |
5163 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); | 5266 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); |
5164 __ b(ne, &slow); | 5267 __ b(ne, &slow); |
5165 GetInt32(masm, r1, r3, r4, r5, &slow); | 5268 GetInt32(masm, r1, r3, r5, r4, &slow); |
5166 __ jmp(&done_checking_r1); | 5269 __ jmp(&done_checking_r1); |
5167 __ bind(&r1_is_smi); | 5270 __ bind(&r1_is_smi); |
5168 __ mov(r3, Operand(r1, ASR, 1)); | 5271 __ mov(r3, Operand(r1, ASR, 1)); |
5169 __ bind(&done_checking_r1); | 5272 __ bind(&done_checking_r1); |
5170 | 5273 |
5171 __ tst(r0, Operand(kSmiTagMask)); | 5274 __ tst(r0, Operand(kSmiTagMask)); |
5172 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. | 5275 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. |
5173 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 5276 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); |
5174 __ b(ne, &slow); | 5277 __ b(ne, &slow); |
5175 GetInt32(masm, r0, r2, r4, r5, &slow); | 5278 GetInt32(masm, r0, r2, r5, r4, &slow); |
5176 __ jmp(&done_checking_r0); | 5279 __ jmp(&done_checking_r0); |
5177 __ bind(&r0_is_smi); | 5280 __ bind(&r0_is_smi); |
5178 __ mov(r2, Operand(r0, ASR, 1)); | 5281 __ mov(r2, Operand(r0, ASR, 1)); |
5179 __ bind(&done_checking_r0); | 5282 __ bind(&done_checking_r0); |
5180 | 5283 |
5181 // r0 and r1: Original operands (Smi or heap numbers). | 5284 // r0 and r1: Original operands (Smi or heap numbers). |
5182 // r2 and r3: Signed int32 operands. | 5285 // r2 and r3: Signed int32 operands. |
5183 switch (op_) { | 5286 switch (op_) { |
5184 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; | 5287 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; |
5185 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; | 5288 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; |
(...skipping 1057 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6243 int CompareStub::MinorKey() { | 6346 int CompareStub::MinorKey() { |
6244 // Encode the two parameters in a unique 16 bit value. | 6347 // Encode the two parameters in a unique 16 bit value. |
6245 ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15)); | 6348 ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15)); |
6246 return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0); | 6349 return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0); |
6247 } | 6350 } |
6248 | 6351 |
6249 | 6352 |
6250 #undef __ | 6353 #undef __ |
6251 | 6354 |
6252 } } // namespace v8::internal | 6355 } } // namespace v8::internal |
OLD | NEW |