 Chromium Code Reviews
 Chromium Code Reviews Issue 385069:
  Fix some style issues in the ARM code....  (Closed) 
  Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
    
  
    Issue 385069:
  Fix some style issues in the ARM code....  (Closed) 
  Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/| OLD | NEW | 
|---|---|
| 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without | 
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are | 
| 4 // met: | 4 // met: | 
| 5 // | 5 // | 
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright | 
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. | 
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above | 
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following | 
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided | 
| (...skipping 4581 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4592 } | 4592 } | 
| 4593 } | 4593 } | 
| 4594 __ mov(pc, Operand(lr)); // Return. | 4594 __ mov(pc, Operand(lr)); // Return. | 
| 4595 } | 4595 } | 
| 4596 // No fall through here. | 4596 // No fall through here. | 
| 4597 | 4597 | 
| 4598 __ bind(¬_identical); | 4598 __ bind(¬_identical); | 
| 4599 } | 4599 } | 
| 4600 | 4600 | 
| 4601 | 4601 | 
| 4602 static void IntegerToDoubleConversionWithVFP3(MacroAssembler* masm, | |
| 4603 Register inReg, | |
| 4604 Register outHighReg, | |
| 4605 Register outLowReg) { | |
| 4606 // ARMv7 VFP3 instructions to implement integer to double conversion. | |
| 4607 // This VFP3 implementation is known to work | |
| 4608 // on ARMv7-VFP3 Snapdragon processor. | |
| 4609 | |
| 4610 __ mov(r7, Operand(inReg, ASR, kSmiTagSize)); | |
| 4611 __ fmsr(s15, r7); | |
| 4612 __ fsitod(d7, s15); | |
| 4613 __ fmrrd(outLowReg, outHighReg, d7); | |
| 4614 } | |
| 4615 | |
| 4616 | |
| 4617 // See comment at call site. | 4602 // See comment at call site. | 
| 4618 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 4603 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 
| 4619 Label* rhs_not_nan, | 4604 Label* rhs_not_nan, | 
| 4620 Label* slow, | 4605 Label* slow, | 
| 4621 bool strict) { | 4606 bool strict) { | 
| 4622 Label lhs_is_smi; | 4607 Label lhs_is_smi; | 
| 4623 __ tst(r0, Operand(kSmiTagMask)); | 4608 __ tst(r0, Operand(kSmiTagMask)); | 
| 4624 __ b(eq, &lhs_is_smi); | 4609 __ b(eq, &lhs_is_smi); | 
| 4625 | 4610 | 
| 4626 // Rhs is a Smi. Check whether the non-smi is a heap number. | 4611 // Rhs is a Smi. Check whether the non-smi is a heap number. | 
| 4627 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 4612 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 
| 4628 if (strict) { | 4613 if (strict) { | 
| 4629 // If lhs was not a number and rhs was a Smi then strict equality cannot | 4614 // If lhs was not a number and rhs was a Smi then strict equality cannot | 
| 4630 // succeed. Return non-equal (r0 is already not zero) | 4615 // succeed. Return non-equal (r0 is already not zero) | 
| 4631 __ mov(pc, Operand(lr), LeaveCC, ne); // Return. | 4616 __ mov(pc, Operand(lr), LeaveCC, ne); // Return. | 
| 4632 } else { | 4617 } else { | 
| 4633 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 4618 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 
| 4634 // the runtime. | 4619 // the runtime. | 
| 4635 __ b(ne, slow); | 4620 __ b(ne, slow); | 
| 4636 } | 4621 } | 
| 4637 | 4622 | 
| 4638 // Rhs is a smi, lhs is a number. | 4623 // Rhs is a smi, lhs is a number. | 
| 4639 __ push(lr); | 4624 __ push(lr); | 
| 4640 | 4625 | 
| 4641 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 4626 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 
| 4642 IntegerToDoubleConversionWithVFP3(masm, r1, r3, r2); | 4627 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 
| 4628 __ IntegerToDoubleConversionWithVFP3(r1, r3, r2); | |
| 4643 } else { | 4629 } else { | 
| 4644 __ mov(r7, Operand(r1)); | 4630 __ mov(r7, Operand(r1)); | 
| 4645 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 4631 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 
| 4646 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 4632 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 
| 4647 } | 4633 } | 
| 4648 | 4634 | 
| 4649 | 4635 | 
| 4650 // r3 and r2 are rhs as double. | 4636 // r3 and r2 are rhs as double. | 
| 4651 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); | 4637 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); | 
| 4652 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 4638 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 
| (...skipping 16 matching lines...) Expand all Loading... | |
| 4669 __ b(ne, slow); | 4655 __ b(ne, slow); | 
| 4670 } | 4656 } | 
| 4671 | 4657 | 
| 4672 // Lhs is a smi, rhs is a number. | 4658 // Lhs is a smi, rhs is a number. | 
| 4673 // r0 is Smi and r1 is heap number. | 4659 // r0 is Smi and r1 is heap number. | 
| 4674 __ push(lr); | 4660 __ push(lr); | 
| 4675 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 4661 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 
| 4676 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); | 4662 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); | 
| 4677 | 4663 | 
| 4678 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 4664 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 
| 4679 IntegerToDoubleConversionWithVFP3(masm, r0, r1, r0); | 4665 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 
| 4666 __ IntegerToDoubleConversionWithVFP3(r0, r1, r0); | |
| 4680 } else { | 4667 } else { | 
| 4681 __ mov(r7, Operand(r0)); | 4668 __ mov(r7, Operand(r0)); | 
| 4682 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 4669 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 
| 4683 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 4670 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 
| 4684 } | 4671 } | 
| 4685 | 4672 | 
| 4686 __ pop(lr); | 4673 __ pop(lr); | 
| 4687 // Fall through to both_loaded_as_doubles. | 4674 // Fall through to both_loaded_as_doubles. | 
| 4688 } | 4675 } | 
| 4689 | 4676 | 
| (...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4879 | 4866 | 
| 4880 __ bind(&both_loaded_as_doubles); | 4867 __ bind(&both_loaded_as_doubles); | 
| 4881 // r0, r1, r2, r3 are the double representations of the left hand side | 4868 // r0, r1, r2, r3 are the double representations of the left hand side | 
| 4882 // and the right hand side. | 4869 // and the right hand side. | 
| 4883 | 4870 | 
| 4884 // Checks for NaN in the doubles we have loaded. Can return the answer or | 4871 // Checks for NaN in the doubles we have loaded. Can return the answer or | 
| 4885 // fall through if neither is a NaN. Also binds rhs_not_nan. | 4872 // fall through if neither is a NaN. Also binds rhs_not_nan. | 
| 4886 EmitNanCheck(masm, &rhs_not_nan, cc_); | 4873 EmitNanCheck(masm, &rhs_not_nan, cc_); | 
| 4887 | 4874 | 
| 4888 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 4875 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 
| 4876 CpuFeatures::Scope scope(CpuFeatures::VFP3); | |
| 4889 // ARMv7 VFP3 instructions to implement double precision comparison. | 4877 // ARMv7 VFP3 instructions to implement double precision comparison. | 
| 4890 // This VFP3 implementation is known to work on | |
| 4891 // ARMv7-VFP3 Snapdragon processor. | |
| 4892 | |
| 4893 __ fmdrr(d6, r0, r1); | 4878 __ fmdrr(d6, r0, r1); | 
| 4894 __ fmdrr(d7, r2, r3); | 4879 __ fmdrr(d7, r2, r3); | 
| 4895 | 4880 | 
| 4896 __ fcmp(d6, d7); | 4881 __ fcmp(d6, d7); | 
| 4897 __ vmrs(pc); | 4882 __ vmrs(pc); | 
| 4898 __ mov(r0, Operand(0), LeaveCC, eq); | 4883 __ mov(r0, Operand(0), LeaveCC, eq); | 
| 4899 __ mov(r0, Operand(1), LeaveCC, lt); | 4884 __ mov(r0, Operand(1), LeaveCC, lt); | 
| 4900 __ mvn(r0, Operand(0), LeaveCC, gt); | 4885 __ mvn(r0, Operand(0), LeaveCC, gt); | 
| 4901 __ mov(pc, Operand(lr)); | 4886 __ mov(pc, Operand(lr)); | 
| 4902 } else { | 4887 } else { | 
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4998 Token::Value operation, | 4983 Token::Value operation, | 
| 4999 OverwriteMode mode) { | 4984 OverwriteMode mode) { | 
| 5000 Label slow, slow_pop_2_first, do_the_call; | 4985 Label slow, slow_pop_2_first, do_the_call; | 
| 5001 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; | 4986 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; | 
| 5002 // Smi-smi case (overflow). | 4987 // Smi-smi case (overflow). | 
| 5003 // Since both are Smis there is no heap number to overwrite, so allocate. | 4988 // Since both are Smis there is no heap number to overwrite, so allocate. | 
| 5004 // The new heap number is in r5. r6 and r7 are scratch. | 4989 // The new heap number is in r5. r6 and r7 are scratch. | 
| 5005 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 4990 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 
| 5006 | 4991 | 
| 5007 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 4992 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 
| 5008 IntegerToDoubleConversionWithVFP3(masm, r0, r3, r2); | 4993 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 
| 5009 IntegerToDoubleConversionWithVFP3(masm, r1, r1, r0); | 4994 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); | 
| 4995 __ IntegerToDoubleConversionWithVFP3(r1, r1, r0); | |
| 5010 } else { | 4996 } else { | 
| 5011 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. | 4997 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. | 
| 5012 __ mov(r7, Operand(r0)); | 4998 __ mov(r7, Operand(r0)); | 
| 5013 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 4999 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 
| 5014 __ push(lr); | 5000 __ push(lr); | 
| 5015 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 5001 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 
| 5016 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. | 5002 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. | 
| 5017 __ mov(r7, Operand(r1)); | 5003 __ mov(r7, Operand(r1)); | 
| 5018 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 5004 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 
| 5019 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 5005 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5051 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); | 5037 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); | 
| 5052 __ jmp(&finished_loading_r0); | 5038 __ jmp(&finished_loading_r0); | 
| 5053 __ bind(&r0_is_smi); | 5039 __ bind(&r0_is_smi); | 
| 5054 if (mode == OVERWRITE_RIGHT) { | 5040 if (mode == OVERWRITE_RIGHT) { | 
| 5055 // We can't overwrite a Smi so get address of new heap number into r5. | 5041 // We can't overwrite a Smi so get address of new heap number into r5. | 
| 5056 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 5042 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 
| 5057 } | 5043 } | 
| 5058 | 5044 | 
| 5059 | 5045 | 
| 5060 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 5046 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 
| 5061 IntegerToDoubleConversionWithVFP3(masm, r0, r3, r2); | 5047 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 
| 5048 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); | |
| 5062 } else { | 5049 } else { | 
| 5063 // Write Smi from r0 to r3 and r2 in double format. | 5050 // Write Smi from r0 to r3 and r2 in double format. | 
| 5064 __ mov(r7, Operand(r0)); | 5051 __ mov(r7, Operand(r0)); | 
| 5065 ConvertToDoubleStub stub3(r3, r2, r7, r6); | 5052 ConvertToDoubleStub stub3(r3, r2, r7, r6); | 
| 5066 __ push(lr); | 5053 __ push(lr); | 
| 5067 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); | 5054 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); | 
| 5068 __ pop(lr); | 5055 __ pop(lr); | 
| 5069 } | 5056 } | 
| 5070 | 5057 | 
| 5071 __ bind(&finished_loading_r0); | 5058 __ bind(&finished_loading_r0); | 
| (...skipping 10 matching lines...) Expand all Loading... | |
| 5082 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 5069 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 
| 5083 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); | 5070 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); | 
| 5084 __ jmp(&finished_loading_r1); | 5071 __ jmp(&finished_loading_r1); | 
| 5085 __ bind(&r1_is_smi); | 5072 __ bind(&r1_is_smi); | 
| 5086 if (mode == OVERWRITE_LEFT) { | 5073 if (mode == OVERWRITE_LEFT) { | 
| 5087 // We can't overwrite a Smi so get address of new heap number into r5. | 5074 // We can't overwrite a Smi so get address of new heap number into r5. | 
| 5088 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 5075 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 
| 5089 } | 5076 } | 
| 5090 | 5077 | 
| 5091 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 5078 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 
| 5092 IntegerToDoubleConversionWithVFP3(masm, r1, r1, r0); | 5079 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 
| 5080 __ IntegerToDoubleConversionWithVFP3(r1, r1, r0); | |
| 5093 } else { | 5081 } else { | 
| 5094 // Write Smi from r1 to r1 and r0 in double format. | 5082 // Write Smi from r1 to r1 and r0 in double format. | 
| 5095 __ mov(r7, Operand(r1)); | 5083 __ mov(r7, Operand(r1)); | 
| 5096 ConvertToDoubleStub stub4(r1, r0, r7, r6); | 5084 ConvertToDoubleStub stub4(r1, r0, r7, r6); | 
| 5097 __ push(lr); | 5085 __ push(lr); | 
| 5098 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); | 5086 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); | 
| 5099 __ pop(lr); | 5087 __ pop(lr); | 
| 5100 } | 5088 } | 
| 5101 | 5089 | 
| 5102 __ bind(&finished_loading_r1); | 5090 __ bind(&finished_loading_r1); | 
| 5103 | 5091 | 
| 5104 __ bind(&do_the_call); | 5092 __ bind(&do_the_call); | 
| 5105 // r0: Left value (least significant part of mantissa). | 5093 // r0: Left value (least significant part of mantissa). | 
| 5106 // r1: Left value (sign, exponent, top of mantissa). | 5094 // r1: Left value (sign, exponent, top of mantissa). | 
| 5107 // r2: Right value (least significant part of mantissa). | 5095 // r2: Right value (least significant part of mantissa). | 
| 5108 // r3: Right value (sign, exponent, top of mantissa). | 5096 // r3: Right value (sign, exponent, top of mantissa). | 
| 5109 // r5: Address of heap number for result. | 5097 // r5: Address of heap number for result. | 
| 5110 | 5098 | 
| 5111 if (CpuFeatures::IsSupported(CpuFeatures::VFP3) && | 5099 if (CpuFeatures::IsSupported(CpuFeatures::VFP3) && | 
| 5112 ((Token::MUL == operation) || | 5100 ((Token::MUL == operation) || | 
| 5113 (Token::DIV == operation) || | 5101 (Token::DIV == operation) || | 
| 5114 (Token::ADD == operation) || | 5102 (Token::ADD == operation) || | 
| 5115 (Token::SUB == operation))) { | 5103 (Token::SUB == operation))) { | 
| 5116 // ARMv7 VFP3 instructions to implement | 5104 CpuFeatures::Scope scope(CpuFeatures::VFP3); | 
| 5117 // double precision, add, subtract, multiply, divide. | 5105 // ARMv7 VFP3 instructions to implement | 
| 5118 // This VFP3 implementation is known to work on | 5106 // double precision, add, subtract, multiply, divide. | 
| 5119 // ARMv7-VFP3 Snapdragon processor | 5107 __ fmdrr(d6, r0, r1); | 
| 5108 __ fmdrr(d7, r2, r3); | |
| 5120 | 5109 | 
| 5121 __ fmdrr(d6, r0, r1); | 5110 if (Token::MUL == operation) __ fmuld(d5, d6, d7); | 
| 
Mads Ager (chromium)
2009/11/12 13:25:24
Please remove extra spacing.
 
Erik Corry
2009/11/12 13:42:37
I also added {}
 | |
| 5122 __ fmdrr(d7, r2, r3); | 5111 else if (Token::DIV == operation) __ fdivd(d5, d6, d7); | 
| 5112 else if (Token::ADD == operation) __ faddd(d5, d6, d7); | |
| 5113 else if (Token::SUB == operation) __ fsubd(d5, d6, d7); | |
| 5123 | 5114 | 
| 5124 if (Token::MUL == operation) __ fmuld(d5, d6, d7); | 5115 __ fmrrd(r0, r1, d5); | 
| 5125 else if (Token::DIV == operation) __ fdivd(d5, d6, d7); | |
| 5126 else if (Token::ADD == operation) __ faddd(d5, d6, d7); | |
| 5127 else if (Token::SUB == operation) __ fsubd(d5, d6, d7); | |
| 5128 | 5116 | 
| 5129 __ fmrrd(r0, r1, d5); | 5117 __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset)); | 
| 5130 | 5118 __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4)); | 
| 5131 __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset)); | 5119 __ mov(r0, Operand(r5)); | 
| 5132 __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4)); | 5120 __ mov(pc, lr); | 
| 5133 __ mov(r0, Operand(r5)); | 5121 return; | 
| 5134 __ mov(pc, lr); | |
| 5135 return; | |
| 5136 } | 5122 } | 
| 5137 __ push(lr); // For later. | 5123 __ push(lr); // For later. | 
| 5138 __ push(r5); // Address of heap number that is answer. | 5124 __ push(r5); // Address of heap number that is answer. | 
| 5139 __ AlignStack(0); | 5125 __ AlignStack(0); | 
| 5140 // Call C routine that may not cause GC or other trouble. | 5126 // Call C routine that may not cause GC or other trouble. | 
| 5141 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation))); | 5127 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation))); | 
| 5142 __ Call(r5); | 5128 __ Call(r5); | 
| 5143 __ pop(r4); // Address of heap number. | 5129 __ pop(r4); // Address of heap number. | 
| 5144 __ cmp(r4, Operand(Smi::FromInt(0))); | 5130 __ cmp(r4, Operand(Smi::FromInt(0))); | 
| 5145 __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push. | 5131 __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push. | 
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5204 __ b(lt, &done); | 5190 __ b(lt, &done); | 
| 5205 if (!CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 5191 if (!CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 
| 5206 // We have a shifted exponent between 0 and 30 in scratch2. | 5192 // We have a shifted exponent between 0 and 30 in scratch2. | 
| 5207 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift)); | 5193 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift)); | 
| 5208 // We now have the exponent in dest. Subtract from 30 to get | 5194 // We now have the exponent in dest. Subtract from 30 to get | 
| 5209 // how much to shift down. | 5195 // how much to shift down. | 
| 5210 __ rsb(dest, dest, Operand(30)); | 5196 __ rsb(dest, dest, Operand(30)); | 
| 5211 } | 5197 } | 
| 5212 __ bind(&right_exponent); | 5198 __ bind(&right_exponent); | 
| 5213 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 5199 if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) { | 
| 5200 CpuFeatures::Scope scope(CpuFeatures::VFP3); | |
| 5214 // ARMv7 VFP3 instructions implementing double precision to integer | 5201 // ARMv7 VFP3 instructions implementing double precision to integer | 
| 5215 // conversion using round to zero. | 5202 // conversion using round to zero. | 
| 5216 // This VFP3 implementation is known to work on | |
| 5217 // ARMv7-VFP3 Snapdragon processor. | |
| 5218 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | 5203 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | 
| 5219 __ fmdrr(d7, scratch2, scratch); | 5204 __ fmdrr(d7, scratch2, scratch); | 
| 5220 __ ftosid(s15, d7); | 5205 __ ftosid(s15, d7); | 
| 5221 __ fmrs(dest, s15); | 5206 __ fmrs(dest, s15); | 
| 5222 } else { | 5207 } else { | 
| 5223 // Get the top bits of the mantissa. | 5208 // Get the top bits of the mantissa. | 
| 5224 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); | 5209 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); | 
| 5225 // Put back the implicit 1. | 5210 // Put back the implicit 1. | 
| 5226 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); | 5211 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); | 
| 5227 // Shift up the mantissa bits to take up the space the exponent used to | 5212 // Shift up the mantissa bits to take up the space the exponent used to | 
| 5228 // take. We just orred in the implicit bit so that took care of one and | 5213 // take. We just orred in the implicit bit so that took care of one and | 
| 5229 // we want to leave the sign bit 0 so we subtract 2 bits from the shift | 5214 // we want to leave the sign bit 0 so we subtract 2 bits from the shift | 
| 5230 // distance. | 5215 // distance. | 
| 5231 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | 5216 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | 
| 5232 __ mov(scratch2, Operand(scratch2, LSL, shift_distance)); | 5217 __ mov(scratch2, Operand(scratch2, LSL, shift_distance)); | 
| 5233 // Put sign in zero flag. | 5218 // Put sign in zero flag. | 
| 5234 __ tst(scratch, Operand(HeapNumber::kSignMask)); | 5219 __ tst(scratch, Operand(HeapNumber::kSignMask)); | 
| 5235 // Get the second half of the double. For some exponents we don't | 5220 // Get the second half of the double. For some exponents we don't | 
| 5236 // actually need this because the bits get shifted out again, but | 5221 // actually need this because the bits get shifted out again, but | 
| 5237 // it's probably slower to test than just to do it. | 5222 // it's probably slower to test than just to do it. | 
| 5238 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | 5223 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | 
| 5239 // Shift down 22 bits to get the last 10 bits. | 5224 // Shift down 22 bits to get the last 10 bits. | 
| 5240 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); | 5225 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); | 
| (...skipping 1102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6343 int CompareStub::MinorKey() { | 6328 int CompareStub::MinorKey() { | 
| 6344 // Encode the two parameters in a unique 16 bit value. | 6329 // Encode the two parameters in a unique 16 bit value. | 
| 6345 ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15)); | 6330 ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15)); | 
| 6346 return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0); | 6331 return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0); | 
| 6347 } | 6332 } | 
| 6348 | 6333 | 
| 6349 | 6334 | 
| 6350 #undef __ | 6335 #undef __ | 
| 6351 | 6336 | 
| 6352 } } // namespace v8::internal | 6337 } } // namespace v8::internal | 
| OLD | NEW |