Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 2826001: ARM: Be more smart about switching instructions when immediates... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1185 matching lines...) Expand 10 before | Expand all | Expand 10 after
1196 } 1196 }
1197 1197
1198 1198
1199 case Token::BIT_OR: 1199 case Token::BIT_OR:
1200 case Token::BIT_XOR: 1200 case Token::BIT_XOR:
1201 case Token::BIT_AND: { 1201 case Token::BIT_AND: {
1202 if (both_sides_are_smi) { 1202 if (both_sides_are_smi) {
1203 switch (op) { 1203 switch (op) {
1204 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; 1204 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
1205 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; 1205 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1206 case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break; 1206 case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1207 default: UNREACHABLE(); 1207 default: UNREACHABLE();
1208 } 1208 }
1209 frame_->EmitPush(tos, TypeInfo::Smi()); 1209 frame_->EmitPush(tos, TypeInfo::Smi());
1210 } else { 1210 } else {
1211 DeferredCode* deferred = 1211 DeferredCode* deferred =
1212 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); 1212 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1213 __ tst(tos, Operand(kSmiTagMask)); 1213 __ tst(tos, Operand(kSmiTagMask));
1214 deferred->Branch(ne); 1214 deferred->Branch(ne);
1215 switch (op) { 1215 switch (op) {
1216 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; 1216 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
1217 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; 1217 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1218 case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break; 1218 case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1219 default: UNREACHABLE(); 1219 default: UNREACHABLE();
1220 } 1220 }
1221 deferred->BindExit(); 1221 deferred->BindExit();
1222 TypeInfo result_type = 1222 TypeInfo result_type =
1223 (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32(); 1223 (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32();
1224 frame_->EmitPush(tos, result_type); 1224 frame_->EmitPush(tos, result_type);
1225 } 1225 }
1226 break; 1226 break;
1227 } 1227 }
1228 1228
(...skipping 5392 matching lines...) Expand 10 before | Expand all | Expand 10 after
6621 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); 6621 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
6622 // 1, 0 and -1 all have 0 for the second word. 6622 // 1, 0 and -1 all have 0 for the second word.
6623 __ mov(mantissa, Operand(0)); 6623 __ mov(mantissa, Operand(0));
6624 __ Ret(); 6624 __ Ret();
6625 6625
6626 __ bind(&not_special); 6626 __ bind(&not_special);
6627 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. 6627 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
6628 // Gets the wrong answer for 0, but we already checked for that case above. 6628 // Gets the wrong answer for 0, but we already checked for that case above.
6629 __ CountLeadingZeros(source_, mantissa, zeros_); 6629 __ CountLeadingZeros(source_, mantissa, zeros_);
6630 // Compute exponent and or it into the exponent register. 6630 // Compute exponent and or it into the exponent register.
6631 // We use mantissa as a scratch register here. 6631 // We use mantissa as a scratch register here. Use a fudge factor to
6632 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias)); 6632 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
6633 // that fit in the ARM's constant field.
6634 int fudge = 0x400;
6635 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
6636 __ add(mantissa, mantissa, Operand(fudge));
6633 __ orr(exponent, 6637 __ orr(exponent,
6634 exponent, 6638 exponent,
6635 Operand(mantissa, LSL, HeapNumber::kExponentShift)); 6639 Operand(mantissa, LSL, HeapNumber::kExponentShift));
6636 // Shift up the source chopping the top bit off. 6640 // Shift up the source chopping the top bit off.
6637 __ add(zeros_, zeros_, Operand(1)); 6641 __ add(zeros_, zeros_, Operand(1));
6638 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. 6642 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
6639 __ mov(source_, Operand(source_, LSL, zeros_)); 6643 __ mov(source_, Operand(source_, LSL, zeros_));
6640 // Compute lower part of fraction (last 12 bits). 6644 // Compute lower part of fraction (last 12 bits).
6641 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); 6645 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
6642 // And the top (top 20 bits). 6646 // And the top (top 20 bits).
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
6695 6699
6696 // Handle the case where the lhs and rhs are the same object. 6700 // Handle the case where the lhs and rhs are the same object.
6697 // Equality is almost reflexive (everything but NaN), so this is a test 6701 // Equality is almost reflexive (everything but NaN), so this is a test
6698 // for "identity and not NaN". 6702 // for "identity and not NaN".
6699 static void EmitIdenticalObjectComparison(MacroAssembler* masm, 6703 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
6700 Label* slow, 6704 Label* slow,
6701 Condition cc, 6705 Condition cc,
6702 bool never_nan_nan) { 6706 bool never_nan_nan) {
6703 Label not_identical; 6707 Label not_identical;
6704 Label heap_number, return_equal; 6708 Label heap_number, return_equal;
6705 Register exp_mask_reg = r5;
6706 __ cmp(r0, r1); 6709 __ cmp(r0, r1);
6707 __ b(ne, &not_identical); 6710 __ b(ne, &not_identical);
6708 6711
6709 // The two objects are identical. If we know that one of them isn't NaN then 6712 // The two objects are identical. If we know that one of them isn't NaN then
6710 // we now know they test equal. 6713 // we now know they test equal.
6711 if (cc != eq || !never_nan_nan) { 6714 if (cc != eq || !never_nan_nan) {
6712 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
6713
6714 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), 6715 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
6715 // so we do the second best thing - test it ourselves. 6716 // so we do the second best thing - test it ourselves.
6716 // They are both equal and they are not both Smis so both of them are not 6717 // They are both equal and they are not both Smis so both of them are not
6717 // Smis. If it's not a heap number, then return equal. 6718 // Smis. If it's not a heap number, then return equal.
6718 if (cc == lt || cc == gt) { 6719 if (cc == lt || cc == gt) {
6719 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); 6720 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
6720 __ b(ge, slow); 6721 __ b(ge, slow);
6721 } else { 6722 } else {
6722 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); 6723 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
6723 __ b(eq, &heap_number); 6724 __ b(eq, &heap_number);
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
6764 if (cc != lt && cc != gt) { 6765 if (cc != lt && cc != gt) {
6765 __ bind(&heap_number); 6766 __ bind(&heap_number);
6766 // It is a heap number, so return non-equal if it's NaN and equal if it's 6767 // It is a heap number, so return non-equal if it's NaN and equal if it's
6767 // not NaN. 6768 // not NaN.
6768 6769
6769 // The representation of NaN values has all exponent bits (52..62) set, 6770 // The representation of NaN values has all exponent bits (52..62) set,
6770 // and not all mantissa bits (0..51) clear. 6771 // and not all mantissa bits (0..51) clear.
6771 // Read top bits of double representation (second word of value). 6772 // Read top bits of double representation (second word of value).
6772 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); 6773 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6773 // Test that exponent bits are all set. 6774 // Test that exponent bits are all set.
6774 __ and_(r3, r2, Operand(exp_mask_reg)); 6775 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
6775 __ cmp(r3, Operand(exp_mask_reg)); 6776 // NaNs have all-one exponents so they sign extend to -1.
6777 __ cmp(r3, Operand(-1));
6776 __ b(ne, &return_equal); 6778 __ b(ne, &return_equal);
6777 6779
6778 // Shift out flag and all exponent bits, retaining only mantissa. 6780 // Shift out flag and all exponent bits, retaining only mantissa.
6779 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); 6781 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
6780 // Or with all low-bits of mantissa. 6782 // Or with all low-bits of mantissa.
6781 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); 6783 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
6782 __ orr(r0, r3, Operand(r2), SetCC); 6784 __ orr(r0, r3, Operand(r2), SetCC);
6783 // For equal we already have the right value in r0: Return zero (equal) 6785 // For equal we already have the right value in r0: Return zero (equal)
6784 // if all bits in mantissa are zero (it's an Infinity) and non-zero if 6786 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
6785 // not (it's a NaN). For <= and >= we need to load r0 with the failing 6787 // not (it's a NaN). For <= and >= we need to load r0 with the failing
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
6886 } 6888 }
6887 6889
6888 6890
6889 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) { 6891 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
6890 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); 6892 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
6891 Register rhs_exponent = exp_first ? r0 : r1; 6893 Register rhs_exponent = exp_first ? r0 : r1;
6892 Register lhs_exponent = exp_first ? r2 : r3; 6894 Register lhs_exponent = exp_first ? r2 : r3;
6893 Register rhs_mantissa = exp_first ? r1 : r0; 6895 Register rhs_mantissa = exp_first ? r1 : r0;
6894 Register lhs_mantissa = exp_first ? r3 : r2; 6896 Register lhs_mantissa = exp_first ? r3 : r2;
6895 Label one_is_nan, neither_is_nan; 6897 Label one_is_nan, neither_is_nan;
6896 Label lhs_not_nan_exp_mask_is_loaded;
6897 6898
6898 Register exp_mask_reg = r5; 6899 __ Sbfx(r4, lhs_exponent, HeapNumber::kExponentShift, HeapNumber::kExponentBit s);
Søren Thygesen Gjesse 2010/06/14 07:56:47 Long line.
Erik Corry 2010/06/14 21:05:46 Done.
6899 6900 // NaNs have all-one exponents so they sign extend to -1.
6900 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask)); 6901 __ cmp(r4, Operand(-1));
6901 __ and_(r4, lhs_exponent, Operand(exp_mask_reg)); 6902 __ b(ne, lhs_not_nan);
6902 __ cmp(r4, Operand(exp_mask_reg));
6903 __ b(ne, &lhs_not_nan_exp_mask_is_loaded);
6904 __ mov(r4, 6903 __ mov(r4,
6905 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), 6904 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
6906 SetCC); 6905 SetCC);
6907 __ b(ne, &one_is_nan); 6906 __ b(ne, &one_is_nan);
6908 __ cmp(lhs_mantissa, Operand(0)); 6907 __ cmp(lhs_mantissa, Operand(0));
6909 __ b(ne, &one_is_nan); 6908 __ b(ne, &one_is_nan);
6910 6909
6911 __ bind(lhs_not_nan); 6910 __ bind(lhs_not_nan);
6912 __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask)); 6911 __ Sbfx(r4, rhs_exponent, HeapNumber::kExponentShift, HeapNumber::kExponentBit s);
Søren Thygesen Gjesse 2010/06/14 07:56:47 Long line.
Erik Corry 2010/06/14 21:05:46 Done.
6913 __ bind(&lhs_not_nan_exp_mask_is_loaded); 6912 // NaNs have all-one exponents so they sign extend to -1.
6914 __ and_(r4, rhs_exponent, Operand(exp_mask_reg)); 6913 __ cmp(r4, Operand(-1));
6915 __ cmp(r4, Operand(exp_mask_reg));
6916 __ b(ne, &neither_is_nan); 6914 __ b(ne, &neither_is_nan);
6917 __ mov(r4, 6915 __ mov(r4,
6918 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), 6916 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
6919 SetCC); 6917 SetCC);
6920 __ b(ne, &one_is_nan); 6918 __ b(ne, &one_is_nan);
6921 __ cmp(rhs_mantissa, Operand(0)); 6919 __ cmp(rhs_mantissa, Operand(0));
6922 __ b(eq, &neither_is_nan); 6920 __ b(eq, &neither_is_nan);
6923 6921
6924 __ bind(&one_is_nan); 6922 __ bind(&one_is_nan);
6925 // NaN comparisons always fail. 6923 // NaN comparisons always fail.
(...skipping 700 matching lines...) Expand 10 before | Expand all | Expand 10 after
7626 static void GetInt32(MacroAssembler* masm, 7624 static void GetInt32(MacroAssembler* masm,
7627 Register source, 7625 Register source,
7628 Register dest, 7626 Register dest,
7629 Register scratch, 7627 Register scratch,
7630 Register scratch2, 7628 Register scratch2,
7631 Label* slow) { 7629 Label* slow) {
7632 Label right_exponent, done; 7630 Label right_exponent, done;
7633 // Get exponent word. 7631 // Get exponent word.
7634 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); 7632 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
7635 // Get exponent alone in scratch2. 7633 // Get exponent alone in scratch2.
7636 __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask)); 7634 __ Ubfx(scratch2, scratch, HeapNumber::kExponentShift, HeapNumber::kExponentBi ts);
Søren Thygesen Gjesse 2010/06/14 07:56:47 Long line.
Erik Corry 2010/06/14 21:05:46 Done.
7637 // Load dest with zero. We use this either for the final shift or 7635 // Load dest with zero. We use this either for the final shift or
7638 // for the answer. 7636 // for the answer.
7639 __ mov(dest, Operand(0)); 7637 __ mov(dest, Operand(0));
7640 // Check whether the exponent matches a 32 bit signed int that is not a Smi. 7638 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
7641 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is 7639 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
7642 // the exponent that we are fastest at and also the highest exponent we can 7640 // the exponent that we are fastest at and also the highest exponent we can
7643 // handle here. 7641 // handle here.
7644 const uint32_t non_smi_exponent = 7642 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
7645 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; 7643 // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
7646 __ cmp(scratch2, Operand(non_smi_exponent)); 7644 // split it up to avoid a constant pool entry. You can't do that in general
7645 // for cmp because of the overflow flag, but we know the exponent is in the
7646 // range 0-2047 so there is no overflow.
7647 int fudge_factor = 0x400;
7648 __ sub(scratch2, scratch2, Operand(fudge_factor));
7649 __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
7647 // If we have a match of the int32-but-not-Smi exponent then skip some logic. 7650 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
7648 __ b(eq, &right_exponent); 7651 __ b(eq, &right_exponent);
7649 // If the exponent is higher than that then go to slow case. This catches 7652 // If the exponent is higher than that then go to slow case. This catches
7650 // numbers that don't fit in a signed int32, infinities and NaNs. 7653 // numbers that don't fit in a signed int32, infinities and NaNs.
7651 __ b(gt, slow); 7654 __ b(gt, slow);
7652 7655
7653 // We know the exponent is smaller than 30 (biased). If it is less than 7656 // We know the exponent is smaller than 30 (biased). If it is less than
7654 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie 7657 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
7655 // it rounds to zero. 7658 // it rounds to zero.
7656 const uint32_t zero_exponent = 7659 const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
7657 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; 7660 __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
7658 __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
7659 // Dest already has a Smi zero. 7661 // Dest already has a Smi zero.
7660 __ b(lt, &done); 7662 __ b(lt, &done);
7661 if (!CpuFeatures::IsSupported(VFP3)) { 7663 if (!CpuFeatures::IsSupported(VFP3)) {
7662 // We have a shifted exponent between 0 and 30 in scratch2. 7664 // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
7663 __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift)); 7665 // get how much to shift down.
7664 // We now have the exponent in dest. Subtract from 30 to get 7666 __ rsb(dest, scratch2, Operand(30));
7665 // how much to shift down.
7666 __ rsb(dest, dest, Operand(30));
7667 } 7667 }
7668 __ bind(&right_exponent); 7668 __ bind(&right_exponent);
7669 if (CpuFeatures::IsSupported(VFP3)) { 7669 if (CpuFeatures::IsSupported(VFP3)) {
7670 CpuFeatures::Scope scope(VFP3); 7670 CpuFeatures::Scope scope(VFP3);
7671 // ARMv7 VFP3 instructions implementing double precision to integer 7671 // ARMv7 VFP3 instructions implementing double precision to integer
7672 // conversion using round to zero. 7672 // conversion using round to zero.
7673 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); 7673 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
7674 __ vmov(d7, scratch2, scratch); 7674 __ vmov(d7, scratch2, scratch);
7675 __ vcvt_s32_f64(s15, d7); 7675 __ vcvt_s32_f64(s15, d7);
7676 __ vmov(dest, s15); 7676 __ vmov(dest, s15);
(...skipping 598 matching lines...) Expand 10 before | Expand all | Expand 10 after
8275 8275
8276 __ bind(&loaded); 8276 __ bind(&loaded);
8277 // r2 = low 32 bits of double value 8277 // r2 = low 32 bits of double value
8278 // r3 = high 32 bits of double value 8278 // r3 = high 32 bits of double value
8279 // Compute hash: 8279 // Compute hash:
8280 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); 8280 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
8281 __ eor(r1, r2, Operand(r3)); 8281 __ eor(r1, r2, Operand(r3));
8282 __ eor(r1, r1, Operand(r1, LSR, 16)); 8282 __ eor(r1, r1, Operand(r1, LSR, 16));
8283 __ eor(r1, r1, Operand(r1, LSR, 8)); 8283 __ eor(r1, r1, Operand(r1, LSR, 8));
8284 ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); 8284 ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
8285 if (CpuFeatures::IsSupported(ARMv7)) { 8285 __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
8286 const int kTranscendentalCacheSizeBits = 9;
8287 ASSERT_EQ(1 << kTranscendentalCacheSizeBits,
8288 TranscendentalCache::kCacheSize);
8289 __ ubfx(r1, r1, 0, kTranscendentalCacheSizeBits);
8290 } else {
8291 __ and_(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
8292 }
8293 8286
8294 // r2 = low 32 bits of double value. 8287 // r2 = low 32 bits of double value.
8295 // r3 = high 32 bits of double value. 8288 // r3 = high 32 bits of double value.
8296 // r1 = TranscendentalCache::hash(double value). 8289 // r1 = TranscendentalCache::hash(double value).
8297 __ mov(r0, 8290 __ mov(r0,
8298 Operand(ExternalReference::transcendental_cache_array_address())); 8291 Operand(ExternalReference::transcendental_cache_array_address()));
8299 // r0 points to cache array. 8292 // r0 points to cache array.
8300 __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0]))); 8293 __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0])));
8301 // r0 points to the cache for the type type_. 8294 // r0 points to the cache for the type type_.
8302 // If NULL, the cache hasn't been initialized yet, so go through runtime. 8295 // If NULL, the cache hasn't been initialized yet, so go through runtime.
(...skipping 2352 matching lines...) Expand 10 before | Expand all | Expand 10 after
10655 __ bind(&string_add_runtime); 10648 __ bind(&string_add_runtime);
10656 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 10649 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
10657 } 10650 }
10658 10651
10659 10652
10660 #undef __ 10653 #undef __
10661 10654
10662 } } // namespace v8::internal 10655 } } // namespace v8::internal
10663 10656
10664 #endif // V8_TARGET_ARCH_ARM 10657 #endif // V8_TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698