OLD | NEW |
---|---|
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 1964 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1975 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) | 1975 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) |
1976 | (rs.offset_); | 1976 | (rs.offset_); |
1977 emit(instr); | 1977 emit(instr); |
1978 } | 1978 } |
1979 | 1979 |
1980 | 1980 |
1981 // --------Coprocessor-instructions---------------- | 1981 // --------Coprocessor-instructions---------------- |
1982 | 1982 |
1983 // Load, store, move. | 1983 // Load, store, move. |
1984 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { | 1984 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { |
1985 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); | 1985 if (is_int16(src.offset_)) { |
1986 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); | |
1987 } else { // Offset > 16 bits, use multiple instructions to load. | |
1988 LoadRegPlusOffsetToAt(src); | |
1989 GenInstrImmediate(LWC1, at, fd, 0); | |
1990 } | |
1986 } | 1991 } |
1987 | 1992 |
1988 | 1993 |
1989 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { | 1994 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { |
1990 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit | 1995 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
1991 // load to two 32-bit loads. | 1996 // load to two 32-bit loads. |
1992 if (IsFp64Mode()) { | 1997 if (IsFp64Mode()) { |
1993 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ + | 1998 if (is_int16(src.offset_) && is_int16(src.offset_ + kPointerSize)) { |
dusmil.imgtec
2015/04/28 10:31:13
No need for two is_int16 checks, later one is enou
| |
1994 Register::kMantissaOffset); | 1999 GenInstrImmediate(LWC1, src.rm(), fd, |
1995 GenInstrImmediate(LW, src.rm(), at, src.offset_ + | 2000 src.offset_ + Register::kMantissaOffset); |
1996 Register::kExponentOffset); | 2001 GenInstrImmediate(LW, src.rm(), at, |
1997 mthc1(at, fd); | 2002 src.offset_ + Register::kExponentOffset); |
1998 } else { | 2003 mthc1(at, fd); |
1999 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ + | 2004 } else { // Offset > 16 bits, use multiple instructions to load. |
2000 Register::kMantissaOffset); | 2005 LoadRegPlusOffsetToAt(src); |
2001 FPURegister nextfpreg; | 2006 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset); |
2002 nextfpreg.setcode(fd.code() + 1); | 2007 GenInstrImmediate(LW, at, at, Register::kExponentOffset); |
2003 GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + | 2008 mthc1(at, fd); |
2004 Register::kExponentOffset); | 2009 } |
2010 } else { // fp32 mode. | |
2011 if (is_int16(src.offset_) && is_int16(src.offset_ + kPointerSize)) { | |
2012 GenInstrImmediate(LWC1, src.rm(), fd, | |
2013 src.offset_ + Register::kMantissaOffset); | |
2014 FPURegister nextfpreg; | |
2015 nextfpreg.setcode(fd.code() + 1); | |
2016 GenInstrImmediate(LWC1, src.rm(), nextfpreg, | |
2017 src.offset_ + Register::kExponentOffset); | |
2018 } else { // Offset > 16 bits, use multiple instructions to load. | |
2019 LoadRegPlusOffsetToAt(src); | |
2020 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset); | |
2021 FPURegister nextfpreg; | |
2022 nextfpreg.setcode(fd.code() + 1); | |
2023 GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset); | |
2024 } | |
2005 } | 2025 } |
2006 } | 2026 } |
2007 | 2027 |
2008 | 2028 |
2009 void Assembler::swc1(FPURegister fd, const MemOperand& src) { | 2029 void Assembler::swc1(FPURegister fd, const MemOperand& src) { |
2010 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); | 2030 if (is_int16(src.offset_)) { |
2031 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); | |
2032 } else { // Offset > 16 bits, use multiple instructions to load. | |
2033 LoadRegPlusOffsetToAt(src); | |
2034 GenInstrImmediate(SWC1, at, fd, 0); | |
2035 } | |
2011 } | 2036 } |
2012 | 2037 |
2013 | 2038 |
2014 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { | 2039 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { |
2015 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit | 2040 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
2016 // store to two 32-bit stores. | 2041 // store to two 32-bit stores. |
2017 if (IsFp64Mode()) { | 2042 if (IsFp64Mode()) { |
2018 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ + | 2043 if (is_int16(src.offset_) && is_int16(src.offset_ + kPointerSize)) { |
2019 Register::kMantissaOffset); | 2044 GenInstrImmediate(SWC1, src.rm(), fd, |
2020 mfhc1(at, fd); | 2045 src.offset_ + Register::kMantissaOffset); |
2021 GenInstrImmediate(SW, src.rm(), at, src.offset_ + | 2046 mfhc1(at, fd); |
2022 Register::kExponentOffset); | 2047 GenInstrImmediate(SW, src.rm(), at, |
2023 } else { | 2048 src.offset_ + Register::kExponentOffset); |
2024 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ + | 2049 } else { // Offset > 16 bits, use multiple instructions to load. |
2025 Register::kMantissaOffset); | 2050 LoadRegPlusOffsetToAt(src); |
2026 FPURegister nextfpreg; | 2051 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset); |
2027 nextfpreg.setcode(fd.code() + 1); | 2052 mfhc1(t8, fd); |
2028 GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + | 2053 GenInstrImmediate(SW, at, t8, Register::kExponentOffset); |
2029 Register::kExponentOffset); | 2054 } |
2055 } else { // fp32 mode. | |
2056 if (is_int16(src.offset_) && is_int16(src.offset_ + kPointerSize)) { | |
2057 GenInstrImmediate(SWC1, src.rm(), fd, | |
2058 src.offset_ + Register::kMantissaOffset); | |
2059 FPURegister nextfpreg; | |
2060 nextfpreg.setcode(fd.code() + 1); | |
2061 GenInstrImmediate(SWC1, src.rm(), nextfpreg, | |
2062 src.offset_ + Register::kExponentOffset); | |
2063 } else { // Offset > 16 bits, use multiple instructions to load. | |
2064 LoadRegPlusOffsetToAt(src); | |
2065 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset); | |
2066 FPURegister nextfpreg; | |
2067 nextfpreg.setcode(fd.code() + 1); | |
2068 GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset); | |
2069 } | |
2030 } | 2070 } |
2031 } | 2071 } |
2032 | 2072 |
2033 | 2073 |
2034 void Assembler::mtc1(Register rt, FPURegister fs) { | 2074 void Assembler::mtc1(Register rt, FPURegister fs) { |
2035 GenInstrRegister(COP1, MTC1, rt, fs, f0); | 2075 GenInstrRegister(COP1, MTC1, rt, fs, f0); |
2036 } | 2076 } |
2037 | 2077 |
2038 | 2078 |
2039 void Assembler::mthc1(Register rt, FPURegister fs) { | 2079 void Assembler::mthc1(Register rt, FPURegister fs) { |
(...skipping 751 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2791 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { | 2831 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { |
2792 // No out-of-line constant pool support. | 2832 // No out-of-line constant pool support. |
2793 DCHECK(!FLAG_enable_ool_constant_pool); | 2833 DCHECK(!FLAG_enable_ool_constant_pool); |
2794 return; | 2834 return; |
2795 } | 2835 } |
2796 | 2836 |
2797 | 2837 |
2798 } } // namespace v8::internal | 2838 } } // namespace v8::internal |
2799 | 2839 |
2800 #endif // V8_TARGET_ARCH_MIPS | 2840 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |