OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 2008 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2019 } else { // Offset > 16 bits, use multiple instructions to load. | 2019 } else { // Offset > 16 bits, use multiple instructions to load. |
2020 LoadRegPlusOffsetToAt(src); | 2020 LoadRegPlusOffsetToAt(src); |
2021 GenInstrImmediate(LWC1, at, fd, 0); | 2021 GenInstrImmediate(LWC1, at, fd, 0); |
2022 } | 2022 } |
2023 } | 2023 } |
2024 | 2024 |
2025 | 2025 |
2026 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { | 2026 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { |
2027 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit | 2027 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
2028 // load to two 32-bit loads. | 2028 // load to two 32-bit loads. |
| 2029 DCHECK(!src.rm().is(at)); |
2029 if (IsFp64Mode()) { | 2030 if (IsFp64Mode()) { |
2030 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { | 2031 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { |
2031 GenInstrImmediate(LWC1, src.rm(), fd, | 2032 GenInstrImmediate(LWC1, src.rm(), fd, |
2032 src.offset_ + Register::kMantissaOffset); | 2033 src.offset_ + Register::kMantissaOffset); |
2033 GenInstrImmediate(LW, src.rm(), at, | 2034 GenInstrImmediate(LW, src.rm(), at, |
2034 src.offset_ + Register::kExponentOffset); | 2035 src.offset_ + Register::kExponentOffset); |
2035 mthc1(at, fd); | 2036 mthc1(at, fd); |
2036 } else { // Offset > 16 bits, use multiple instructions to load. | 2037 } else { // Offset > 16 bits, use multiple instructions to load. |
2037 LoadRegPlusOffsetToAt(src); | 2038 LoadRegPlusOffsetToAt(src); |
2038 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset); | 2039 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset); |
(...skipping 25 matching lines...) Expand all Loading... |
2064 } else { // Offset > 16 bits, use multiple instructions to load. | 2065 } else { // Offset > 16 bits, use multiple instructions to load. |
2065 LoadRegPlusOffsetToAt(src); | 2066 LoadRegPlusOffsetToAt(src); |
2066 GenInstrImmediate(SWC1, at, fd, 0); | 2067 GenInstrImmediate(SWC1, at, fd, 0); |
2067 } | 2068 } |
2068 } | 2069 } |
2069 | 2070 |
2070 | 2071 |
2071 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { | 2072 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { |
2072 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit | 2073 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
2073 // store to two 32-bit stores. | 2074 // store to two 32-bit stores. |
| 2075 DCHECK(!src.rm().is(at)); |
| 2076 DCHECK(!src.rm().is(t8)); |
2074 if (IsFp64Mode()) { | 2077 if (IsFp64Mode()) { |
2075 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { | 2078 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { |
2076 GenInstrImmediate(SWC1, src.rm(), fd, | 2079 GenInstrImmediate(SWC1, src.rm(), fd, |
2077 src.offset_ + Register::kMantissaOffset); | 2080 src.offset_ + Register::kMantissaOffset); |
2078 mfhc1(at, fd); | 2081 mfhc1(at, fd); |
2079 GenInstrImmediate(SW, src.rm(), at, | 2082 GenInstrImmediate(SW, src.rm(), at, |
2080 src.offset_ + Register::kExponentOffset); | 2083 src.offset_ + Register::kExponentOffset); |
2081 } else { // Offset > 16 bits, use multiple instructions to load. | 2084 } else { // Offset > 16 bits, use multiple instructions to load. |
2082 LoadRegPlusOffsetToAt(src); | 2085 LoadRegPlusOffsetToAt(src); |
2083 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset); | 2086 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset); |
(...skipping 1009 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3093 if (patched) { | 3096 if (patched) { |
3094 CpuFeatures::FlushICache(pc + 2, sizeof(Address)); | 3097 CpuFeatures::FlushICache(pc + 2, sizeof(Address)); |
3095 } | 3098 } |
3096 } | 3099 } |
3097 | 3100 |
3098 | 3101 |
3099 } // namespace internal | 3102 } // namespace internal |
3100 } // namespace v8 | 3103 } // namespace v8 |
3101 | 3104 |
3102 #endif // V8_TARGET_ARCH_MIPS | 3105 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |