| OLD | NEW |
| 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 4605 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4616 __ mov(pc, Operand(lr), LeaveCC, ne); // Return. | 4616 __ mov(pc, Operand(lr), LeaveCC, ne); // Return. |
| 4617 } else { | 4617 } else { |
| 4618 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 4618 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
| 4619 // the runtime. | 4619 // the runtime. |
| 4620 __ b(ne, slow); | 4620 __ b(ne, slow); |
| 4621 } | 4621 } |
| 4622 | 4622 |
| 4623 // Rhs is a smi, lhs is a number. | 4623 // Rhs is a smi, lhs is a number. |
| 4624 __ push(lr); | 4624 __ push(lr); |
| 4625 | 4625 |
| 4626 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 4626 if (CpuFeatures::IsSupported(VFP3)) { |
| 4627 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 4627 CpuFeatures::Scope scope(VFP3); |
| 4628 __ IntegerToDoubleConversionWithVFP3(r1, r3, r2); | 4628 __ IntegerToDoubleConversionWithVFP3(r1, r3, r2); |
| 4629 } else { | 4629 } else { |
| 4630 __ mov(r7, Operand(r1)); | 4630 __ mov(r7, Operand(r1)); |
| 4631 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 4631 ConvertToDoubleStub stub1(r3, r2, r7, r6); |
| 4632 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 4632 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
| 4633 } | 4633 } |
| 4634 | 4634 |
| 4635 | 4635 |
| 4636 // r3 and r2 are rhs as double. | 4636 // r3 and r2 are rhs as double. |
| 4637 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); | 4637 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 4654 // the runtime. | 4654 // the runtime. |
| 4655 __ b(ne, slow); | 4655 __ b(ne, slow); |
| 4656 } | 4656 } |
| 4657 | 4657 |
| 4658 // Lhs is a smi, rhs is a number. | 4658 // Lhs is a smi, rhs is a number. |
| 4659 // r0 is Smi and r1 is heap number. | 4659 // r0 is Smi and r1 is heap number. |
| 4660 __ push(lr); | 4660 __ push(lr); |
| 4661 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 4661 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
| 4662 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); | 4662 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); |
| 4663 | 4663 |
| 4664 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 4664 if (CpuFeatures::IsSupported(VFP3)) { |
| 4665 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 4665 CpuFeatures::Scope scope(VFP3); |
| 4666 __ IntegerToDoubleConversionWithVFP3(r0, r1, r0); | 4666 __ IntegerToDoubleConversionWithVFP3(r0, r1, r0); |
| 4667 } else { | 4667 } else { |
| 4668 __ mov(r7, Operand(r0)); | 4668 __ mov(r7, Operand(r0)); |
| 4669 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 4669 ConvertToDoubleStub stub2(r1, r0, r7, r6); |
| 4670 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 4670 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
| 4671 } | 4671 } |
| 4672 | 4672 |
| 4673 __ pop(lr); | 4673 __ pop(lr); |
| 4674 // Fall through to both_loaded_as_doubles. | 4674 // Fall through to both_loaded_as_doubles. |
| 4675 } | 4675 } |
| (...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4865 EmitSmiNonsmiComparison(masm, &rhs_not_nan, &slow, strict_); | 4865 EmitSmiNonsmiComparison(masm, &rhs_not_nan, &slow, strict_); |
| 4866 | 4866 |
| 4867 __ bind(&both_loaded_as_doubles); | 4867 __ bind(&both_loaded_as_doubles); |
| 4868 // r0, r1, r2, r3 are the double representations of the left hand side | 4868 // r0, r1, r2, r3 are the double representations of the left hand side |
| 4869 // and the right hand side. | 4869 // and the right hand side. |
| 4870 | 4870 |
| 4871 // Checks for NaN in the doubles we have loaded. Can return the answer or | 4871 // Checks for NaN in the doubles we have loaded. Can return the answer or |
| 4872 // fall through if neither is a NaN. Also binds rhs_not_nan. | 4872 // fall through if neither is a NaN. Also binds rhs_not_nan. |
| 4873 EmitNanCheck(masm, &rhs_not_nan, cc_); | 4873 EmitNanCheck(masm, &rhs_not_nan, cc_); |
| 4874 | 4874 |
| 4875 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 4875 if (CpuFeatures::IsSupported(VFP3)) { |
| 4876 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 4876 CpuFeatures::Scope scope(VFP3); |
| 4877 // ARMv7 VFP3 instructions to implement double precision comparison. | 4877 // ARMv7 VFP3 instructions to implement double precision comparison. |
| 4878 __ fmdrr(d6, r0, r1); | 4878 __ fmdrr(d6, r0, r1); |
| 4879 __ fmdrr(d7, r2, r3); | 4879 __ fmdrr(d7, r2, r3); |
| 4880 | 4880 |
| 4881 __ fcmp(d6, d7); | 4881 __ fcmp(d6, d7); |
| 4882 __ vmrs(pc); | 4882 __ vmrs(pc); |
| 4883 __ mov(r0, Operand(0), LeaveCC, eq); | 4883 __ mov(r0, Operand(0), LeaveCC, eq); |
| 4884 __ mov(r0, Operand(1), LeaveCC, lt); | 4884 __ mov(r0, Operand(1), LeaveCC, lt); |
| 4885 __ mvn(r0, Operand(0), LeaveCC, gt); | 4885 __ mvn(r0, Operand(0), LeaveCC, gt); |
| 4886 __ mov(pc, Operand(lr)); | 4886 __ mov(pc, Operand(lr)); |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4982 const Builtins::JavaScript& builtin, | 4982 const Builtins::JavaScript& builtin, |
| 4983 Token::Value operation, | 4983 Token::Value operation, |
| 4984 OverwriteMode mode) { | 4984 OverwriteMode mode) { |
| 4985 Label slow, slow_pop_2_first, do_the_call; | 4985 Label slow, slow_pop_2_first, do_the_call; |
| 4986 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; | 4986 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; |
| 4987 // Smi-smi case (overflow). | 4987 // Smi-smi case (overflow). |
| 4988 // Since both are Smis there is no heap number to overwrite, so allocate. | 4988 // Since both are Smis there is no heap number to overwrite, so allocate. |
| 4989 // The new heap number is in r5. r6 and r7 are scratch. | 4989 // The new heap number is in r5. r6 and r7 are scratch. |
| 4990 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 4990 AllocateHeapNumber(masm, &slow, r5, r6, r7); |
| 4991 | 4991 |
| 4992 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 4992 if (CpuFeatures::IsSupported(VFP3)) { |
| 4993 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 4993 CpuFeatures::Scope scope(VFP3); |
| 4994 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); | 4994 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); |
| 4995 __ IntegerToDoubleConversionWithVFP3(r1, r1, r0); | 4995 __ IntegerToDoubleConversionWithVFP3(r1, r1, r0); |
| 4996 } else { | 4996 } else { |
| 4997 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. | 4997 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. |
| 4998 __ mov(r7, Operand(r0)); | 4998 __ mov(r7, Operand(r0)); |
| 4999 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 4999 ConvertToDoubleStub stub1(r3, r2, r7, r6); |
| 5000 __ push(lr); | 5000 __ push(lr); |
| 5001 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 5001 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
| 5002 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. | 5002 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. |
| 5003 __ mov(r7, Operand(r1)); | 5003 __ mov(r7, Operand(r1)); |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5036 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 5036 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
| 5037 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); | 5037 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); |
| 5038 __ jmp(&finished_loading_r0); | 5038 __ jmp(&finished_loading_r0); |
| 5039 __ bind(&r0_is_smi); | 5039 __ bind(&r0_is_smi); |
| 5040 if (mode == OVERWRITE_RIGHT) { | 5040 if (mode == OVERWRITE_RIGHT) { |
| 5041 // We can't overwrite a Smi so get address of new heap number into r5. | 5041 // We can't overwrite a Smi so get address of new heap number into r5. |
| 5042 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 5042 AllocateHeapNumber(masm, &slow, r5, r6, r7); |
| 5043 } | 5043 } |
| 5044 | 5044 |
| 5045 | 5045 |
| 5046 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 5046 if (CpuFeatures::IsSupported(VFP3)) { |
| 5047 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 5047 CpuFeatures::Scope scope(VFP3); |
| 5048 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); | 5048 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); |
| 5049 } else { | 5049 } else { |
| 5050 // Write Smi from r0 to r3 and r2 in double format. | 5050 // Write Smi from r0 to r3 and r2 in double format. |
| 5051 __ mov(r7, Operand(r0)); | 5051 __ mov(r7, Operand(r0)); |
| 5052 ConvertToDoubleStub stub3(r3, r2, r7, r6); | 5052 ConvertToDoubleStub stub3(r3, r2, r7, r6); |
| 5053 __ push(lr); | 5053 __ push(lr); |
| 5054 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); | 5054 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); |
| 5055 __ pop(lr); | 5055 __ pop(lr); |
| 5056 } | 5056 } |
| 5057 | 5057 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 5068 // Calling convention says that first double is in r0 and r1. | 5068 // Calling convention says that first double is in r0 and r1. |
| 5069 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 5069 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
| 5070 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); | 5070 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); |
| 5071 __ jmp(&finished_loading_r1); | 5071 __ jmp(&finished_loading_r1); |
| 5072 __ bind(&r1_is_smi); | 5072 __ bind(&r1_is_smi); |
| 5073 if (mode == OVERWRITE_LEFT) { | 5073 if (mode == OVERWRITE_LEFT) { |
| 5074 // We can't overwrite a Smi so get address of new heap number into r5. | 5074 // We can't overwrite a Smi so get address of new heap number into r5. |
| 5075 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 5075 AllocateHeapNumber(masm, &slow, r5, r6, r7); |
| 5076 } | 5076 } |
| 5077 | 5077 |
| 5078 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 5078 if (CpuFeatures::IsSupported(VFP3)) { |
| 5079 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 5079 CpuFeatures::Scope scope(VFP3); |
| 5080 __ IntegerToDoubleConversionWithVFP3(r1, r1, r0); | 5080 __ IntegerToDoubleConversionWithVFP3(r1, r1, r0); |
| 5081 } else { | 5081 } else { |
| 5082 // Write Smi from r1 to r1 and r0 in double format. | 5082 // Write Smi from r1 to r1 and r0 in double format. |
| 5083 __ mov(r7, Operand(r1)); | 5083 __ mov(r7, Operand(r1)); |
| 5084 ConvertToDoubleStub stub4(r1, r0, r7, r6); | 5084 ConvertToDoubleStub stub4(r1, r0, r7, r6); |
| 5085 __ push(lr); | 5085 __ push(lr); |
| 5086 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); | 5086 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); |
| 5087 __ pop(lr); | 5087 __ pop(lr); |
| 5088 } | 5088 } |
| 5089 | 5089 |
| 5090 __ bind(&finished_loading_r1); | 5090 __ bind(&finished_loading_r1); |
| 5091 | 5091 |
| 5092 __ bind(&do_the_call); | 5092 __ bind(&do_the_call); |
| 5093 // r0: Left value (least significant part of mantissa). | 5093 // r0: Left value (least significant part of mantissa). |
| 5094 // r1: Left value (sign, exponent, top of mantissa). | 5094 // r1: Left value (sign, exponent, top of mantissa). |
| 5095 // r2: Right value (least significant part of mantissa). | 5095 // r2: Right value (least significant part of mantissa). |
| 5096 // r3: Right value (sign, exponent, top of mantissa). | 5096 // r3: Right value (sign, exponent, top of mantissa). |
| 5097 // r5: Address of heap number for result. | 5097 // r5: Address of heap number for result. |
| 5098 | 5098 |
| 5099 if (CpuFeatures::IsSupported(CpuFeatures::VFP3) && | 5099 if (CpuFeatures::IsSupported(VFP3) && |
| 5100 ((Token::MUL == operation) || | 5100 ((Token::MUL == operation) || |
| 5101 (Token::DIV == operation) || | 5101 (Token::DIV == operation) || |
| 5102 (Token::ADD == operation) || | 5102 (Token::ADD == operation) || |
| 5103 (Token::SUB == operation))) { | 5103 (Token::SUB == operation))) { |
| 5104 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 5104 CpuFeatures::Scope scope(VFP3); |
| 5105 // ARMv7 VFP3 instructions to implement | 5105 // ARMv7 VFP3 instructions to implement |
| 5106 // double precision, add, subtract, multiply, divide. | 5106 // double precision, add, subtract, multiply, divide. |
| 5107 __ fmdrr(d6, r0, r1); | 5107 __ fmdrr(d6, r0, r1); |
| 5108 __ fmdrr(d7, r2, r3); | 5108 __ fmdrr(d7, r2, r3); |
| 5109 | 5109 |
| 5110 if (Token::MUL == operation) { | 5110 if (Token::MUL == operation) { |
| 5111 __ fmuld(d5, d6, d7); | 5111 __ fmuld(d5, d6, d7); |
| 5112 } else if (Token::DIV == operation) { | 5112 } else if (Token::DIV == operation) { |
| 5113 __ fdivd(d5, d6, d7); | 5113 __ fdivd(d5, d6, d7); |
| 5114 } else if (Token::ADD == operation) { | 5114 } else if (Token::ADD == operation) { |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5188 __ b(gt, slow); | 5188 __ b(gt, slow); |
| 5189 | 5189 |
| 5190 // We know the exponent is smaller than 30 (biased). If it is less than | 5190 // We know the exponent is smaller than 30 (biased). If it is less than |
| 5191 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie | 5191 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie |
| 5192 // it rounds to zero. | 5192 // it rounds to zero. |
| 5193 const uint32_t zero_exponent = | 5193 const uint32_t zero_exponent = |
| 5194 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; | 5194 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; |
| 5195 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC); | 5195 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC); |
| 5196 // Dest already has a Smi zero. | 5196 // Dest already has a Smi zero. |
| 5197 __ b(lt, &done); | 5197 __ b(lt, &done); |
| 5198 if (!CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 5198 if (!CpuFeatures::IsSupported(VFP3)) { |
| 5199 // We have a shifted exponent between 0 and 30 in scratch2. | 5199 // We have a shifted exponent between 0 and 30 in scratch2. |
| 5200 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift)); | 5200 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift)); |
| 5201 // We now have the exponent in dest. Subtract from 30 to get | 5201 // We now have the exponent in dest. Subtract from 30 to get |
| 5202 // how much to shift down. | 5202 // how much to shift down. |
| 5203 __ rsb(dest, dest, Operand(30)); | 5203 __ rsb(dest, dest, Operand(30)); |
| 5204 } | 5204 } |
| 5205 __ bind(&right_exponent); | 5205 __ bind(&right_exponent); |
| 5206 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 5206 if (CpuFeatures::IsSupported(VFP3)) { |
| 5207 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 5207 CpuFeatures::Scope scope(VFP3); |
| 5208 // ARMv7 VFP3 instructions implementing double precision to integer | 5208 // ARMv7 VFP3 instructions implementing double precision to integer |
| 5209 // conversion using round to zero. | 5209 // conversion using round to zero. |
| 5210 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | 5210 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); |
| 5211 __ fmdrr(d7, scratch2, scratch); | 5211 __ fmdrr(d7, scratch2, scratch); |
| 5212 __ ftosid(s15, d7); | 5212 __ ftosid(s15, d7); |
| 5213 __ fmrs(dest, s15); | 5213 __ fmrs(dest, s15); |
| 5214 } else { | 5214 } else { |
| 5215 // Get the top bits of the mantissa. | 5215 // Get the top bits of the mantissa. |
| 5216 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); | 5216 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); |
| 5217 // Put back the implicit 1. | 5217 // Put back the implicit 1. |
| (...skipping 1117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6335 int CompareStub::MinorKey() { | 6335 int CompareStub::MinorKey() { |
| 6336 // Encode the two parameters in a unique 16 bit value. | 6336 // Encode the two parameters in a unique 16 bit value. |
| 6337 ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15)); | 6337 ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15)); |
| 6338 return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0); | 6338 return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0); |
| 6339 } | 6339 } |
| 6340 | 6340 |
| 6341 | 6341 |
| 6342 #undef __ | 6342 #undef __ |
| 6343 | 6343 |
| 6344 } } // namespace v8::internal | 6344 } } // namespace v8::internal |
| OLD | NEW |