Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(242)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 556020: Move heap numbers directly to VFP3 registers in comparison stub. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 4829 matching lines...) Expand 10 before | Expand all | Expand 10 after
4840 4840
4841 __ bind(&not_identical); 4841 __ bind(&not_identical);
4842 } 4842 }
4843 4843
4844 4844
4845 // See comment at call site. 4845 // See comment at call site.
4846 static void EmitSmiNonsmiComparison(MacroAssembler* masm, 4846 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
4847 Label* lhs_not_nan, 4847 Label* lhs_not_nan,
4848 Label* slow, 4848 Label* slow,
4849 bool strict) { 4849 bool strict) {
4850 Label lhs_is_smi; 4850 Label rhs_is_smi;
4851 __ tst(r0, Operand(kSmiTagMask)); 4851 __ tst(r0, Operand(kSmiTagMask));
4852 __ b(eq, &lhs_is_smi); 4852 __ b(eq, &rhs_is_smi);
4853 4853
4854 // Rhs is a Smi. Check whether the non-smi is a heap number. 4854 // Lhs is a Smi. Check whether the rhs is a heap number.
4855 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); 4855 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
4856 if (strict) { 4856 if (strict) {
4857 // If lhs was not a number and rhs was a Smi then strict equality cannot 4857 // If rhs is not a number and lhs is a Smi then strict equality cannot
4858 // succeed. Return non-equal (r0 is already not zero) 4858 // succeed. Return non-equal (r0 is already not zero)
4859 __ mov(pc, Operand(lr), LeaveCC, ne); // Return. 4859 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
4860 } else { 4860 } else {
4861 // Smi compared non-strictly with a non-Smi non-heap-number. Call 4861 // Smi compared non-strictly with a non-Smi non-heap-number. Call
4862 // the runtime. 4862 // the runtime.
4863 __ b(ne, slow); 4863 __ b(ne, slow);
4864 } 4864 }
4865 4865
4866 // Rhs is a smi, lhs is a number. 4866 // Lhs (r1) is a smi, rhs (r0) is a number.
4867 __ push(lr);
4868
4869 if (CpuFeatures::IsSupported(VFP3)) { 4867 if (CpuFeatures::IsSupported(VFP3)) {
4868 // Convert lhs to a double in d7 .
4870 CpuFeatures::Scope scope(VFP3); 4869 CpuFeatures::Scope scope(VFP3);
4871 __ IntegerToDoubleConversionWithVFP3(r1, r3, r2); 4870 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
4871 __ vmov(s15, r7);
4872 __ vcvt(d7, s15);
4873 // Load the double from rhs, tagged HeapNumber r0, to d6.
4874 __ sub(r7, r0, Operand(kHeapObjectTag));
4875 __ vldr(d6, r7, HeapNumber::kValueOffset);
4872 } else { 4876 } else {
4877 __ push(lr);
4878 // Convert lhs to a double in r2, r3.
4873 __ mov(r7, Operand(r1)); 4879 __ mov(r7, Operand(r1));
4874 ConvertToDoubleStub stub1(r3, r2, r7, r6); 4880 ConvertToDoubleStub stub1(r3, r2, r7, r6);
4875 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); 4881 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
4882 // Load rhs to a double in r0, r1.
4883 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
4884 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
4885 __ pop(lr);
4876 } 4886 }
4877 4887
4878
4879 // r3 and r2 are rhs as double.
4880 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
4881 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
4882 // We now have both loaded as doubles but we can skip the lhs nan check 4888 // We now have both loaded as doubles but we can skip the lhs nan check
4883 // since it's a Smi. 4889 // since it's a smi.
4884 __ pop(lr);
4885 __ jmp(lhs_not_nan); 4890 __ jmp(lhs_not_nan);
4886 4891
4887 __ bind(&lhs_is_smi); 4892 __ bind(&rhs_is_smi);
4888 // Lhs is a Smi. Check whether the non-smi is a heap number. 4893 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
4889 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); 4894 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
4890 if (strict) { 4895 if (strict) {
4891 // If lhs was not a number and rhs was a Smi then strict equality cannot 4896 // If lhs is not a number and rhs is a smi then strict equality cannot
4892 // succeed. Return non-equal. 4897 // succeed. Return non-equal.
4893 __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal. 4898 __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
4894 __ mov(pc, Operand(lr), LeaveCC, ne); // Return. 4899 __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
4895 } else { 4900 } else {
4896 // Smi compared non-strictly with a non-Smi non-heap-number. Call 4901 // Smi compared non-strictly with a non-smi non-heap-number. Call
4897 // the runtime. 4902 // the runtime.
4898 __ b(ne, slow); 4903 __ b(ne, slow);
4899 } 4904 }
4900 4905
4901 // Lhs is a smi, rhs is a number. 4906 // Rhs (r0) is a smi, lhs (r1) is a heap number.
4902 // r0 is Smi and r1 is heap number.
4903 __ push(lr);
4904 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
4905 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
4906
4907 if (CpuFeatures::IsSupported(VFP3)) { 4907 if (CpuFeatures::IsSupported(VFP3)) {
4908 // Convert rhs to a double in d6 .
4908 CpuFeatures::Scope scope(VFP3); 4909 CpuFeatures::Scope scope(VFP3);
4909 __ IntegerToDoubleConversionWithVFP3(r0, r1, r0); 4910 // Load the double from lhs, tagged HeapNumber r1, to d7.
4911 __ sub(r7, r1, Operand(kHeapObjectTag));
4912 __ vldr(d7, r7, HeapNumber::kValueOffset);
4913 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
4914 __ vmov(s13, r7);
4915 __ vcvt(d6, s13);
4910 } else { 4916 } else {
4917 __ push(lr);
4918 // Load lhs to a double in r2, r3.
4919 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
4920 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
4921 // Convert rhs to a double in r0, r1.
4911 __ mov(r7, Operand(r0)); 4922 __ mov(r7, Operand(r0));
4912 ConvertToDoubleStub stub2(r1, r0, r7, r6); 4923 ConvertToDoubleStub stub2(r1, r0, r7, r6);
4913 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); 4924 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
4925 __ pop(lr);
4914 } 4926 }
4915
4916 __ pop(lr);
4917 // Fall through to both_loaded_as_doubles. 4927 // Fall through to both_loaded_as_doubles.
4918 } 4928 }
4919 4929
4920 4930
4921 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) { 4931 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
4922 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); 4932 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
4923 Register rhs_exponent = exp_first ? r0 : r1; 4933 Register rhs_exponent = exp_first ? r0 : r1;
4924 Register lhs_exponent = exp_first ? r2 : r3; 4934 Register lhs_exponent = exp_first ? r2 : r3;
4925 Register rhs_mantissa = exp_first ? r1 : r0; 4935 Register rhs_mantissa = exp_first ? r1 : r0;
4926 Register lhs_mantissa = exp_first ? r3 : r2; 4936 Register lhs_mantissa = exp_first ? r3 : r2;
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
5055 Label* not_heap_numbers, 5065 Label* not_heap_numbers,
5056 Label* slow) { 5066 Label* slow) {
5057 __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE); 5067 __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
5058 __ b(ne, not_heap_numbers); 5068 __ b(ne, not_heap_numbers);
5059 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); 5069 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
5060 __ cmp(r2, r3); 5070 __ cmp(r2, r3);
5061 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. 5071 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
5062 5072
5063 // Both are heap numbers. Load them up then jump to the code we have 5073 // Both are heap numbers. Load them up then jump to the code we have
5064 // for that. 5074 // for that.
5065 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); 5075 if (CpuFeatures::IsSupported(VFP3)) {
5066 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); 5076 CpuFeatures::Scope scope(VFP3);
5067 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); 5077 __ sub(r7, r0, Operand(kHeapObjectTag));
5068 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); 5078 __ vldr(d6, r7, HeapNumber::kValueOffset);
5079 __ sub(r7, r1, Operand(kHeapObjectTag));
5080 __ vldr(d7, r7, HeapNumber::kValueOffset);
5081 } else {
5082 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
5083 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
5084 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
5085 __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
5086 }
5069 __ jmp(both_loaded_as_doubles); 5087 __ jmp(both_loaded_as_doubles);
5070 } 5088 }
5071 5089
5072 5090
5073 // Fast negative check for symbol-to-symbol equality. 5091 // Fast negative check for symbol-to-symbol equality.
5074 static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) { 5092 static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
5075 // r2 is object type of r0. 5093 // r2 is object type of r0.
5076 // Ensure that no non-strings have the symbol bit set. 5094 // Ensure that no non-strings have the symbol bit set.
5077 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); 5095 ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
5078 ASSERT(kSymbolTag != 0); 5096 ASSERT(kSymbolTag != 0);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
5110 ASSERT_EQ(0, Smi::FromInt(0)); 5128 ASSERT_EQ(0, Smi::FromInt(0));
5111 __ and_(r2, r0, Operand(r1)); 5129 __ and_(r2, r0, Operand(r1));
5112 __ tst(r2, Operand(kSmiTagMask)); 5130 __ tst(r2, Operand(kSmiTagMask));
5113 __ b(ne, &not_smis); 5131 __ b(ne, &not_smis);
5114 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: 5132 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
5115 // 1) Return the answer. 5133 // 1) Return the answer.
5116 // 2) Go to slow. 5134 // 2) Go to slow.
5117 // 3) Fall through to both_loaded_as_doubles. 5135 // 3) Fall through to both_loaded_as_doubles.
5118 // 4) Jump to lhs_not_nan. 5136 // 4) Jump to lhs_not_nan.
5119 // In cases 3 and 4 we have found out we were dealing with a number-number 5137 // In cases 3 and 4 we have found out we were dealing with a number-number
5120 // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles. 5138 // comparison. If VFP3 is supported the double values of the numbers have
5139 // been loaded into d7 and d6. Otherwise, the double values have been loaded
5140 // into r0, r1, r2, and r3.
5121 EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_); 5141 EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
5122 5142
5123 __ bind(&both_loaded_as_doubles); 5143 __ bind(&both_loaded_as_doubles);
5124 // r0, r1, r2, r3 are the double representations of the right hand side 5144 // The arguments have been converted to doubles and stored in d6 and d7, if
5125 // and the left hand side. 5145 // VFP3 is supported, or in r0, r1, r2, and r3.
5126
5127 if (CpuFeatures::IsSupported(VFP3)) { 5146 if (CpuFeatures::IsSupported(VFP3)) {
5128 __ bind(&lhs_not_nan); 5147 __ bind(&lhs_not_nan);
5129 CpuFeatures::Scope scope(VFP3); 5148 CpuFeatures::Scope scope(VFP3);
5130 Label no_nan; 5149 Label no_nan;
5131 // ARMv7 VFP3 instructions to implement double precision comparison. 5150 // ARMv7 VFP3 instructions to implement double precision comparison.
5132 __ vmov(d6, r0, r1);
5133 __ vmov(d7, r2, r3);
5134
5135 __ vcmp(d7, d6); 5151 __ vcmp(d7, d6);
5136 __ vmrs(pc); // Move vector status bits to normal status bits. 5152 __ vmrs(pc); // Move vector status bits to normal status bits.
5137 Label nan; 5153 Label nan;
5138 __ b(vs, &nan); 5154 __ b(vs, &nan);
5139 __ mov(r0, Operand(EQUAL), LeaveCC, eq); 5155 __ mov(r0, Operand(EQUAL), LeaveCC, eq);
5140 __ mov(r0, Operand(LESS), LeaveCC, lt); 5156 __ mov(r0, Operand(LESS), LeaveCC, lt);
5141 __ mov(r0, Operand(GREATER), LeaveCC, gt); 5157 __ mov(r0, Operand(GREATER), LeaveCC, gt);
5142 __ mov(pc, Operand(lr)); 5158 __ mov(pc, Operand(lr));
5143 5159
5144 __ bind(&nan); 5160 __ bind(&nan);
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
5266 const Builtins::JavaScript& builtin, 5282 const Builtins::JavaScript& builtin,
5267 Token::Value operation, 5283 Token::Value operation,
5268 OverwriteMode mode) { 5284 OverwriteMode mode) {
5269 Label slow, slow_pop_2_first, do_the_call; 5285 Label slow, slow_pop_2_first, do_the_call;
5270 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; 5286 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
5271 // Smi-smi case (overflow). 5287 // Smi-smi case (overflow).
5272 // Since both are Smis there is no heap number to overwrite, so allocate. 5288 // Since both are Smis there is no heap number to overwrite, so allocate.
5273 // The new heap number is in r5. r6 and r7 are scratch. 5289 // The new heap number is in r5. r6 and r7 are scratch.
5274 AllocateHeapNumber(masm, &slow, r5, r6, r7); 5290 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5275 5291
5276 if (CpuFeatures::IsSupported(VFP3) && Token::MOD != operation) { 5292 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
5293 // using registers d7 and d6 for the double values.
5294 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
5295 Token::MOD != operation;
5296 if (use_fp_registers) {
5277 CpuFeatures::Scope scope(VFP3); 5297 CpuFeatures::Scope scope(VFP3);
5278 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); 5298 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5279 __ vmov(s15, r7); 5299 __ vmov(s15, r7);
5280 __ vcvt(d7, s15); 5300 __ vcvt(d7, s15);
5281 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); 5301 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5282 __ vmov(s13, r7); 5302 __ vmov(s13, r7);
5283 __ vcvt(d6, s13); 5303 __ vcvt(d6, s13);
5284 } else { 5304 } else {
5285 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. 5305 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
5286 __ mov(r7, Operand(r0)); 5306 __ mov(r7, Operand(r0));
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
5355 } 5375 }
5356 5376
5357 // Move r0 to a double in r2-r3. 5377 // Move r0 to a double in r2-r3.
5358 __ tst(r0, Operand(kSmiTagMask)); 5378 __ tst(r0, Operand(kSmiTagMask));
5359 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. 5379 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5360 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); 5380 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5361 __ b(ne, &slow); 5381 __ b(ne, &slow);
5362 if (mode == OVERWRITE_RIGHT) { 5382 if (mode == OVERWRITE_RIGHT) {
5363 __ mov(r5, Operand(r0)); // Overwrite this heap number. 5383 __ mov(r5, Operand(r0)); // Overwrite this heap number.
5364 } 5384 }
5365 if (CpuFeatures::IsSupported(VFP3) && Token::MOD != operation) { 5385 if (use_fp_registers) {
5366 CpuFeatures::Scope scope(VFP3); 5386 CpuFeatures::Scope scope(VFP3);
5367 // Load the double from tagged HeapNumber r0 to d7. 5387 // Load the double from tagged HeapNumber r0 to d7.
5368 __ sub(r7, r0, Operand(kHeapObjectTag)); 5388 __ sub(r7, r0, Operand(kHeapObjectTag));
5369 __ vldr(d7, r7, HeapNumber::kValueOffset); 5389 __ vldr(d7, r7, HeapNumber::kValueOffset);
5370 } else { 5390 } else {
5371 // Calling convention says that second double is in r2 and r3. 5391 // Calling convention says that second double is in r2 and r3.
5372 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); 5392 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
5373 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); 5393 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
5374 } 5394 }
5375 __ jmp(&finished_loading_r0); 5395 __ jmp(&finished_loading_r0);
5376 __ bind(&r0_is_smi); 5396 __ bind(&r0_is_smi);
5377 if (mode == OVERWRITE_RIGHT) { 5397 if (mode == OVERWRITE_RIGHT) {
5378 // We can't overwrite a Smi so get address of new heap number into r5. 5398 // We can't overwrite a Smi so get address of new heap number into r5.
5379 AllocateHeapNumber(masm, &slow, r5, r6, r7); 5399 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5380 } 5400 }
5381 5401
5382 if (CpuFeatures::IsSupported(VFP3) && Token::MOD != operation) { 5402 if (use_fp_registers) {
5383 CpuFeatures::Scope scope(VFP3); 5403 CpuFeatures::Scope scope(VFP3);
5384 // Convert smi in r0 to double in d7 5404 // Convert smi in r0 to double in d7.
5385 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); 5405 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5386 __ vmov(s15, r7); 5406 __ vmov(s15, r7);
5387 __ vcvt(d7, s15); 5407 __ vcvt(d7, s15);
5388 } else { 5408 } else {
5389 // Write Smi from r0 to r3 and r2 in double format. 5409 // Write Smi from r0 to r3 and r2 in double format.
5390 __ mov(r7, Operand(r0)); 5410 __ mov(r7, Operand(r0));
5391 ConvertToDoubleStub stub3(r3, r2, r7, r6); 5411 ConvertToDoubleStub stub3(r3, r2, r7, r6);
5392 __ push(lr); 5412 __ push(lr);
5393 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); 5413 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
5394 __ pop(lr); 5414 __ pop(lr);
5395 } 5415 }
5396 5416
5397 __ bind(&finished_loading_r0); 5417 __ bind(&finished_loading_r0);
5398 5418
5399 // Move r1 to a double in r0-r1. 5419 // Move r1 to a double in r0-r1.
5400 __ tst(r1, Operand(kSmiTagMask)); 5420 __ tst(r1, Operand(kSmiTagMask));
5401 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. 5421 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5402 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); 5422 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5403 __ b(ne, &slow); 5423 __ b(ne, &slow);
5404 if (mode == OVERWRITE_LEFT) { 5424 if (mode == OVERWRITE_LEFT) {
5405 __ mov(r5, Operand(r1)); // Overwrite this heap number. 5425 __ mov(r5, Operand(r1)); // Overwrite this heap number.
5406 } 5426 }
5407 if (CpuFeatures::IsSupported(VFP3) && Token::MOD != operation) { 5427 if (use_fp_registers) {
5408 CpuFeatures::Scope scope(VFP3); 5428 CpuFeatures::Scope scope(VFP3);
5409 // Load the double from tagged HeapNumber r1 to d6. 5429 // Load the double from tagged HeapNumber r1 to d6.
5410 __ sub(r7, r1, Operand(kHeapObjectTag)); 5430 __ sub(r7, r1, Operand(kHeapObjectTag));
5411 __ vldr(d6, r7, HeapNumber::kValueOffset); 5431 __ vldr(d6, r7, HeapNumber::kValueOffset);
5412 } else { 5432 } else {
5413 // Calling convention says that first double is in r0 and r1. 5433 // Calling convention says that first double is in r0 and r1.
5414 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); 5434 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
5415 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); 5435 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
5416 } 5436 }
5417 __ jmp(&finished_loading_r1); 5437 __ jmp(&finished_loading_r1);
5418 __ bind(&r1_is_smi); 5438 __ bind(&r1_is_smi);
5419 if (mode == OVERWRITE_LEFT) { 5439 if (mode == OVERWRITE_LEFT) {
5420 // We can't overwrite a Smi so get address of new heap number into r5. 5440 // We can't overwrite a Smi so get address of new heap number into r5.
5421 AllocateHeapNumber(masm, &slow, r5, r6, r7); 5441 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5422 } 5442 }
5423 5443
5424 if (CpuFeatures::IsSupported(VFP3) && Token::MOD != operation) { 5444 if (use_fp_registers) {
5425 CpuFeatures::Scope scope(VFP3); 5445 CpuFeatures::Scope scope(VFP3);
5426 // Convert smi in r1 to double in d6 5446 // Convert smi in r1 to double in d6.
5427 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); 5447 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5428 __ vmov(s13, r7); 5448 __ vmov(s13, r7);
5429 __ vcvt(d6, s13); 5449 __ vcvt(d6, s13);
5430 } else { 5450 } else {
5431 // Write Smi from r1 to r1 and r0 in double format. 5451 // Write Smi from r1 to r1 and r0 in double format.
5432 __ mov(r7, Operand(r1)); 5452 __ mov(r7, Operand(r1));
5433 ConvertToDoubleStub stub4(r1, r0, r7, r6); 5453 ConvertToDoubleStub stub4(r1, r0, r7, r6);
5434 __ push(lr); 5454 __ push(lr);
5435 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); 5455 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
5436 __ pop(lr); 5456 __ pop(lr);
5437 } 5457 }
5438 5458
5439 __ bind(&finished_loading_r1); 5459 __ bind(&finished_loading_r1);
5440 5460
5441 __ bind(&do_the_call); 5461 __ bind(&do_the_call);
5442 // If we are inlining the operation using VFP3 instructions for 5462 // If we are inlining the operation using VFP3 instructions for
5443 // add, subtract, multiply, or divide, the arguments are in d6 and d7. 5463 // add, subtract, multiply, or divide, the arguments are in d6 and d7.
5444 if (CpuFeatures::IsSupported(VFP3) && 5464 if (use_fp_registers) {
5445 ((Token::MUL == operation) ||
5446 (Token::DIV == operation) ||
5447 (Token::ADD == operation) ||
5448 (Token::SUB == operation))) {
5449 CpuFeatures::Scope scope(VFP3); 5465 CpuFeatures::Scope scope(VFP3);
5450 // ARMv7 VFP3 instructions to implement 5466 // ARMv7 VFP3 instructions to implement
5451 // double precision, add, subtract, multiply, divide. 5467 // double precision, add, subtract, multiply, divide.
5452 5468
5453 if (Token::MUL == operation) { 5469 if (Token::MUL == operation) {
5454 __ vmul(d5, d6, d7); 5470 __ vmul(d5, d6, d7);
5455 } else if (Token::DIV == operation) { 5471 } else if (Token::DIV == operation) {
5456 __ vdiv(d5, d6, d7); 5472 __ vdiv(d5, d6, d7);
5457 } else if (Token::ADD == operation) { 5473 } else if (Token::ADD == operation) {
5458 __ vadd(d5, d6, d7); 5474 __ vadd(d5, d6, d7);
(...skipping 1411 matching lines...) Expand 10 before | Expand all | Expand 10 after
6870 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) 6886 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6871 // tagged as a small integer. 6887 // tagged as a small integer.
6872 __ bind(&runtime); 6888 __ bind(&runtime);
6873 __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1); 6889 __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
6874 } 6890 }
6875 6891
6876 6892
6877 #undef __ 6893 #undef __
6878 6894
6879 } } // namespace v8::internal 6895 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698