Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 2008 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2019 } else { // Offset > 16 bits, use multiple instructions to load. | 2019 } else { // Offset > 16 bits, use multiple instructions to load. |
| 2020 LoadRegPlusOffsetToAt(src); | 2020 LoadRegPlusOffsetToAt(src); |
| 2021 GenInstrImmediate(LWC1, at, fd, 0); | 2021 GenInstrImmediate(LWC1, at, fd, 0); |
| 2022 } | 2022 } |
| 2023 } | 2023 } |
| 2024 | 2024 |
| 2025 | 2025 |
| 2026 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { | 2026 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { |
| 2027 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit | 2027 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
| 2028 // load to two 32-bit loads. | 2028 // load to two 32-bit loads. |
| 2029 DCHECK(!src.rm().is(at)); | |
| 2030 DCHECK(!src.rm().is(t8)); | |
|
paul.l...
2015/09/08 15:48:41
Why check for t8 here? You do not use it in this f
Djordje.Pesic
2015/09/09 06:41:54
DCHECK(!src.rm().is(t8)) removed. Macro-asm Leeave
| |
| 2029 if (IsFp64Mode()) { | 2031 if (IsFp64Mode()) { |
| 2030 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { | 2032 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { |
| 2031 GenInstrImmediate(LWC1, src.rm(), fd, | 2033 GenInstrImmediate(LWC1, src.rm(), fd, |
| 2032 src.offset_ + Register::kMantissaOffset); | 2034 src.offset_ + Register::kMantissaOffset); |
| 2033 GenInstrImmediate(LW, src.rm(), at, | 2035 GenInstrImmediate(LW, src.rm(), at, |
| 2034 src.offset_ + Register::kExponentOffset); | 2036 src.offset_ + Register::kExponentOffset); |
| 2035 mthc1(at, fd); | 2037 mthc1(at, fd); |
| 2036 } else { // Offset > 16 bits, use multiple instructions to load. | 2038 } else { // Offset > 16 bits, use multiple instructions to load. |
| 2037 LoadRegPlusOffsetToAt(src); | 2039 LoadRegPlusOffsetToAt(src); |
| 2038 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset); | 2040 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset); |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 2064 } else { // Offset > 16 bits, use multiple instructions to load. | 2066 } else { // Offset > 16 bits, use multiple instructions to load. |
| 2065 LoadRegPlusOffsetToAt(src); | 2067 LoadRegPlusOffsetToAt(src); |
| 2066 GenInstrImmediate(SWC1, at, fd, 0); | 2068 GenInstrImmediate(SWC1, at, fd, 0); |
| 2067 } | 2069 } |
| 2068 } | 2070 } |
| 2069 | 2071 |
| 2070 | 2072 |
| 2071 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { | 2073 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { |
| 2072 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit | 2074 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
| 2073 // store to two 32-bit stores. | 2075 // store to two 32-bit stores. |
| 2076 DCHECK(!src.rm().is(at)); | |
| 2077 DCHECK(!src.rm().is(t8)); | |
| 2074 if (IsFp64Mode()) { | 2078 if (IsFp64Mode()) { |
| 2075 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { | 2079 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { |
| 2076 GenInstrImmediate(SWC1, src.rm(), fd, | 2080 GenInstrImmediate(SWC1, src.rm(), fd, |
| 2077 src.offset_ + Register::kMantissaOffset); | 2081 src.offset_ + Register::kMantissaOffset); |
| 2078 mfhc1(at, fd); | 2082 mfhc1(at, fd); |
| 2079 GenInstrImmediate(SW, src.rm(), at, | 2083 GenInstrImmediate(SW, src.rm(), at, |
| 2080 src.offset_ + Register::kExponentOffset); | 2084 src.offset_ + Register::kExponentOffset); |
| 2081 } else { // Offset > 16 bits, use multiple instructions to load. | 2085 } else { // Offset > 16 bits, use multiple instructions to load. |
| 2082 LoadRegPlusOffsetToAt(src); | 2086 LoadRegPlusOffsetToAt(src); |
| 2083 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset); | 2087 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset); |
| (...skipping 1009 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3093 if (patched) { | 3097 if (patched) { |
| 3094 CpuFeatures::FlushICache(pc + 2, sizeof(Address)); | 3098 CpuFeatures::FlushICache(pc + 2, sizeof(Address)); |
| 3095 } | 3099 } |
| 3096 } | 3100 } |
| 3097 | 3101 |
| 3098 | 3102 |
| 3099 } // namespace internal | 3103 } // namespace internal |
| 3100 } // namespace v8 | 3104 } // namespace v8 |
| 3101 | 3105 |
| 3102 #endif // V8_TARGET_ARCH_MIPS | 3106 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |