| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_ARM | 7 #if V8_TARGET_ARCH_ARM |
| 8 | 8 |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/base/division-by-constant.h" | 10 #include "src/base/division-by-constant.h" |
| (...skipping 840 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 851 | 851 |
| 852 void MacroAssembler::Ldrd(Register dst1, Register dst2, | 852 void MacroAssembler::Ldrd(Register dst1, Register dst2, |
| 853 const MemOperand& src, Condition cond) { | 853 const MemOperand& src, Condition cond) { |
| 854 DCHECK(src.rm().is(no_reg)); | 854 DCHECK(src.rm().is(no_reg)); |
| 855 DCHECK(!dst1.is(lr)); // r14. | 855 DCHECK(!dst1.is(lr)); // r14. |
| 856 | 856 |
| 857 // V8 does not use this addressing mode, so the fallback code | 857 // V8 does not use this addressing mode, so the fallback code |
| 858 // below doesn't support it yet. | 858 // below doesn't support it yet. |
| 859 DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex)); | 859 DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex)); |
| 860 | 860 |
| 861 // Generate two ldr instructions if ldrd is not available. | 861 // Generate two ldr instructions if ldrd is not applicable. |
| 862 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && | 862 if ((dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) { |
| 863 (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) { | |
| 864 CpuFeatureScope scope(this, ARMv7); | |
| 865 ldrd(dst1, dst2, src, cond); | 863 ldrd(dst1, dst2, src, cond); |
| 866 } else { | 864 } else { |
| 867 if ((src.am() == Offset) || (src.am() == NegOffset)) { | 865 if ((src.am() == Offset) || (src.am() == NegOffset)) { |
| 868 MemOperand src2(src); | 866 MemOperand src2(src); |
| 869 src2.set_offset(src2.offset() + 4); | 867 src2.set_offset(src2.offset() + 4); |
| 870 if (dst1.is(src.rn())) { | 868 if (dst1.is(src.rn())) { |
| 871 ldr(dst2, src2, cond); | 869 ldr(dst2, src2, cond); |
| 872 ldr(dst1, src, cond); | 870 ldr(dst1, src, cond); |
| 873 } else { | 871 } else { |
| 874 ldr(dst1, src, cond); | 872 ldr(dst1, src, cond); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 892 | 890 |
| 893 void MacroAssembler::Strd(Register src1, Register src2, | 891 void MacroAssembler::Strd(Register src1, Register src2, |
| 894 const MemOperand& dst, Condition cond) { | 892 const MemOperand& dst, Condition cond) { |
| 895 DCHECK(dst.rm().is(no_reg)); | 893 DCHECK(dst.rm().is(no_reg)); |
| 896 DCHECK(!src1.is(lr)); // r14. | 894 DCHECK(!src1.is(lr)); // r14. |
| 897 | 895 |
| 898 // V8 does not use this addressing mode, so the fallback code | 896 // V8 does not use this addressing mode, so the fallback code |
| 899 // below doesn't support it yet. | 897 // below doesn't support it yet. |
| 900 DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); | 898 DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); |
| 901 | 899 |
| 902 // Generate two str instructions if strd is not available. | 900 // Generate two str instructions if strd is not applicable. |
| 903 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && | 901 if ((src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) { |
| 904 (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) { | |
| 905 CpuFeatureScope scope(this, ARMv7); | |
| 906 strd(src1, src2, dst, cond); | 902 strd(src1, src2, dst, cond); |
| 907 } else { | 903 } else { |
| 908 MemOperand dst2(dst); | 904 MemOperand dst2(dst); |
| 909 if ((dst.am() == Offset) || (dst.am() == NegOffset)) { | 905 if ((dst.am() == Offset) || (dst.am() == NegOffset)) { |
| 910 dst2.set_offset(dst2.offset() + 4); | 906 dst2.set_offset(dst2.offset() + 4); |
| 911 str(src1, dst, cond); | 907 str(src1, dst, cond); |
| 912 str(src2, dst2, cond); | 908 str(src2, dst2, cond); |
| 913 } else { // PostIndex or NegPostIndex. | 909 } else { // PostIndex or NegPostIndex. |
| 914 DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); | 910 DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); |
| 915 dst2.set_offset(dst2.offset() - 4); | 911 dst2.set_offset(dst2.offset() - 4); |
| (...skipping 3095 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4011 } | 4007 } |
| 4012 } | 4008 } |
| 4013 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift)); | 4009 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift)); |
| 4014 add(result, result, Operand(dividend, LSR, 31)); | 4010 add(result, result, Operand(dividend, LSR, 31)); |
| 4015 } | 4011 } |
| 4016 | 4012 |
| 4017 } // namespace internal | 4013 } // namespace internal |
| 4018 } // namespace v8 | 4014 } // namespace v8 |
| 4019 | 4015 |
| 4020 #endif // V8_TARGET_ARCH_ARM | 4016 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |