OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 1922 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1933 emit(instr); | 1933 emit(instr); |
1934 } | 1934 } |
1935 | 1935 |
1936 | 1936 |
1937 // ------------Memory-instructions------------- | 1937 // ------------Memory-instructions------------- |
1938 | 1938 |
1939 // Helper for base-reg + offset, when offset is larger than int16. | 1939 // Helper for base-reg + offset, when offset is larger than int16. |
1940 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) { | 1940 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) { |
1941 DCHECK(!src.rm().is(at)); | 1941 DCHECK(!src.rm().is(at)); |
1942 DCHECK(is_int32(src.offset_)); | 1942 DCHECK(is_int32(src.offset_)); |
1943 daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask); | 1943 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask); |
1944 dsll(at, at, kLuiShift); | |
1945 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset. | 1944 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset. |
1946 daddu(at, at, src.rm()); // Add base register. | 1945 daddu(at, at, src.rm()); // Add base register. |
1947 } | 1946 } |
1948 | 1947 |
| 1948 // Helper for base-reg + upper part of offset, when offset is larger than int16. |
| 1949 // Loads higher part of the offset to AT register. |
| 1950 // Returns lower part of the offset to be used as offset |
| 1951 // in Load/Store instructions |
| 1952 int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) { |
| 1953 DCHECK(!src.rm().is(at)); |
| 1954 DCHECK(is_int32(src.offset_)); |
| 1955 int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask; |
| 1956 // If the highest bit of the lower part of the offset is 1, this would make |
| 1957 // the offset in the load/store instruction negative. We need to compensate |
| 1958 // for this by adding 1 to the upper part of the offset. |
| 1959 if (src.offset_ & kNegOffset) { |
| 1960 if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) { |
| 1961 LoadRegPlusOffsetToAt(src); |
| 1962 return 0; |
| 1963 } |
| 1964 |
| 1965 hi += 1; |
| 1966 } |
| 1967 |
| 1968 lui(at, hi); |
| 1969 daddu(at, at, src.rm()); |
| 1970 return (src.offset_ & kImm16Mask); |
| 1971 } |
1949 | 1972 |
1950 void Assembler::lb(Register rd, const MemOperand& rs) { | 1973 void Assembler::lb(Register rd, const MemOperand& rs) { |
1951 if (is_int16(rs.offset_)) { | 1974 if (is_int16(rs.offset_)) { |
1952 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); | 1975 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); |
1953 } else { // Offset > 16 bits, use multiple instructions to load. | 1976 } else { // Offset > 16 bits, use multiple instructions to load. |
1954 LoadRegPlusOffsetToAt(rs); | 1977 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
1955 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0)); | 1978 GenInstrImmediate(LB, at, rd, off16); |
1956 } | 1979 } |
1957 } | 1980 } |
1958 | 1981 |
1959 | 1982 |
1960 void Assembler::lbu(Register rd, const MemOperand& rs) { | 1983 void Assembler::lbu(Register rd, const MemOperand& rs) { |
1961 if (is_int16(rs.offset_)) { | 1984 if (is_int16(rs.offset_)) { |
1962 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_); | 1985 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_); |
1963 } else { // Offset > 16 bits, use multiple instructions to load. | 1986 } else { // Offset > 16 bits, use multiple instructions to load. |
1964 LoadRegPlusOffsetToAt(rs); | 1987 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
1965 GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0)); | 1988 GenInstrImmediate(LBU, at, rd, off16); |
1966 } | 1989 } |
1967 } | 1990 } |
1968 | 1991 |
1969 | 1992 |
1970 void Assembler::lh(Register rd, const MemOperand& rs) { | 1993 void Assembler::lh(Register rd, const MemOperand& rs) { |
1971 if (is_int16(rs.offset_)) { | 1994 if (is_int16(rs.offset_)) { |
1972 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_); | 1995 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_); |
1973 } else { // Offset > 16 bits, use multiple instructions to load. | 1996 } else { // Offset > 16 bits, use multiple instructions to load. |
1974 LoadRegPlusOffsetToAt(rs); | 1997 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
1975 GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0)); | 1998 GenInstrImmediate(LH, at, rd, off16); |
1976 } | 1999 } |
1977 } | 2000 } |
1978 | 2001 |
1979 | 2002 |
1980 void Assembler::lhu(Register rd, const MemOperand& rs) { | 2003 void Assembler::lhu(Register rd, const MemOperand& rs) { |
1981 if (is_int16(rs.offset_)) { | 2004 if (is_int16(rs.offset_)) { |
1982 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_); | 2005 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_); |
1983 } else { // Offset > 16 bits, use multiple instructions to load. | 2006 } else { // Offset > 16 bits, use multiple instructions to load. |
1984 LoadRegPlusOffsetToAt(rs); | 2007 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
1985 GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0)); | 2008 GenInstrImmediate(LHU, at, rd, off16); |
1986 } | 2009 } |
1987 } | 2010 } |
1988 | 2011 |
1989 | 2012 |
1990 void Assembler::lw(Register rd, const MemOperand& rs) { | 2013 void Assembler::lw(Register rd, const MemOperand& rs) { |
1991 if (is_int16(rs.offset_)) { | 2014 if (is_int16(rs.offset_)) { |
1992 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_); | 2015 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_); |
1993 } else { // Offset > 16 bits, use multiple instructions to load. | 2016 } else { // Offset > 16 bits, use multiple instructions to load. |
1994 LoadRegPlusOffsetToAt(rs); | 2017 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
1995 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0)); | 2018 GenInstrImmediate(LW, at, rd, off16); |
1996 } | 2019 } |
1997 } | 2020 } |
1998 | 2021 |
1999 | 2022 |
2000 void Assembler::lwu(Register rd, const MemOperand& rs) { | 2023 void Assembler::lwu(Register rd, const MemOperand& rs) { |
2001 if (is_int16(rs.offset_)) { | 2024 if (is_int16(rs.offset_)) { |
2002 GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_); | 2025 GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_); |
2003 } else { // Offset > 16 bits, use multiple instructions to load. | 2026 } else { // Offset > 16 bits, use multiple instructions to load. |
2004 LoadRegPlusOffsetToAt(rs); | 2027 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
2005 GenInstrImmediate(LWU, at, rd, 0); // Equiv to lwu(rd, MemOperand(at, 0)); | 2028 GenInstrImmediate(LWU, at, rd, off16); |
2006 } | 2029 } |
2007 } | 2030 } |
2008 | 2031 |
2009 | 2032 |
2010 void Assembler::lwl(Register rd, const MemOperand& rs) { | 2033 void Assembler::lwl(Register rd, const MemOperand& rs) { |
2011 DCHECK(is_int16(rs.offset_)); | 2034 DCHECK(is_int16(rs.offset_)); |
2012 DCHECK(kArchVariant == kMips64r2); | 2035 DCHECK(kArchVariant == kMips64r2); |
2013 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); | 2036 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); |
2014 } | 2037 } |
2015 | 2038 |
2016 | 2039 |
2017 void Assembler::lwr(Register rd, const MemOperand& rs) { | 2040 void Assembler::lwr(Register rd, const MemOperand& rs) { |
2018 DCHECK(is_int16(rs.offset_)); | 2041 DCHECK(is_int16(rs.offset_)); |
2019 DCHECK(kArchVariant == kMips64r2); | 2042 DCHECK(kArchVariant == kMips64r2); |
2020 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); | 2043 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); |
2021 } | 2044 } |
2022 | 2045 |
2023 | 2046 |
2024 void Assembler::sb(Register rd, const MemOperand& rs) { | 2047 void Assembler::sb(Register rd, const MemOperand& rs) { |
2025 if (is_int16(rs.offset_)) { | 2048 if (is_int16(rs.offset_)) { |
2026 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_); | 2049 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_); |
2027 } else { // Offset > 16 bits, use multiple instructions to store. | 2050 } else { // Offset > 16 bits, use multiple instructions to store. |
2028 LoadRegPlusOffsetToAt(rs); | 2051 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
2029 GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0)); | 2052 GenInstrImmediate(SB, at, rd, off16); |
2030 } | 2053 } |
2031 } | 2054 } |
2032 | 2055 |
2033 | 2056 |
2034 void Assembler::sh(Register rd, const MemOperand& rs) { | 2057 void Assembler::sh(Register rd, const MemOperand& rs) { |
2035 if (is_int16(rs.offset_)) { | 2058 if (is_int16(rs.offset_)) { |
2036 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_); | 2059 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_); |
2037 } else { // Offset > 16 bits, use multiple instructions to store. | 2060 } else { // Offset > 16 bits, use multiple instructions to store. |
2038 LoadRegPlusOffsetToAt(rs); | 2061 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
2039 GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0)); | 2062 GenInstrImmediate(SH, at, rd, off16); |
2040 } | 2063 } |
2041 } | 2064 } |
2042 | 2065 |
2043 | 2066 |
2044 void Assembler::sw(Register rd, const MemOperand& rs) { | 2067 void Assembler::sw(Register rd, const MemOperand& rs) { |
2045 if (is_int16(rs.offset_)) { | 2068 if (is_int16(rs.offset_)) { |
2046 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_); | 2069 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_); |
2047 } else { // Offset > 16 bits, use multiple instructions to store. | 2070 } else { // Offset > 16 bits, use multiple instructions to store. |
2048 LoadRegPlusOffsetToAt(rs); | 2071 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
2049 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0)); | 2072 GenInstrImmediate(SW, at, rd, off16); |
2050 } | 2073 } |
2051 } | 2074 } |
2052 | 2075 |
2053 | 2076 |
2054 void Assembler::swl(Register rd, const MemOperand& rs) { | 2077 void Assembler::swl(Register rd, const MemOperand& rs) { |
2055 DCHECK(is_int16(rs.offset_)); | 2078 DCHECK(is_int16(rs.offset_)); |
2056 DCHECK(kArchVariant == kMips64r2); | 2079 DCHECK(kArchVariant == kMips64r2); |
2057 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); | 2080 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); |
2058 } | 2081 } |
2059 | 2082 |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2123 DCHECK(is_int16(rs.offset_)); | 2146 DCHECK(is_int16(rs.offset_)); |
2124 DCHECK(kArchVariant == kMips64r2); | 2147 DCHECK(kArchVariant == kMips64r2); |
2125 GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_); | 2148 GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_); |
2126 } | 2149 } |
2127 | 2150 |
2128 | 2151 |
2129 void Assembler::ld(Register rd, const MemOperand& rs) { | 2152 void Assembler::ld(Register rd, const MemOperand& rs) { |
2130 if (is_int16(rs.offset_)) { | 2153 if (is_int16(rs.offset_)) { |
2131 GenInstrImmediate(LD, rs.rm(), rd, rs.offset_); | 2154 GenInstrImmediate(LD, rs.rm(), rd, rs.offset_); |
2132 } else { // Offset > 16 bits, use multiple instructions to load. | 2155 } else { // Offset > 16 bits, use multiple instructions to load. |
2133 LoadRegPlusOffsetToAt(rs); | 2156 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
2134 GenInstrImmediate(LD, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0)); | 2157 GenInstrImmediate(LD, at, rd, off16); |
2135 } | 2158 } |
2136 } | 2159 } |
2137 | 2160 |
2138 | 2161 |
2139 void Assembler::sd(Register rd, const MemOperand& rs) { | 2162 void Assembler::sd(Register rd, const MemOperand& rs) { |
2140 if (is_int16(rs.offset_)) { | 2163 if (is_int16(rs.offset_)) { |
2141 GenInstrImmediate(SD, rs.rm(), rd, rs.offset_); | 2164 GenInstrImmediate(SD, rs.rm(), rd, rs.offset_); |
2142 } else { // Offset > 16 bits, use multiple instructions to store. | 2165 } else { // Offset > 16 bits, use multiple instructions to store. |
2143 LoadRegPlusOffsetToAt(rs); | 2166 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
2144 GenInstrImmediate(SD, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0)); | 2167 GenInstrImmediate(SD, at, rd, off16); |
2145 } | 2168 } |
2146 } | 2169 } |
2147 | 2170 |
2148 | 2171 |
2149 // ---------PC-Relative instructions----------- | 2172 // ---------PC-Relative instructions----------- |
2150 | 2173 |
2151 void Assembler::addiupc(Register rs, int32_t imm19) { | 2174 void Assembler::addiupc(Register rs, int32_t imm19) { |
2152 DCHECK(kArchVariant == kMips64r6); | 2175 DCHECK(kArchVariant == kMips64r6); |
2153 DCHECK(rs.is_valid() && is_int19(imm19)); | 2176 DCHECK(rs.is_valid() && is_int19(imm19)); |
2154 uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask); | 2177 uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask); |
(...skipping 389 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2544 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL); | 2567 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL); |
2545 } | 2568 } |
2546 | 2569 |
2547 // --------Coprocessor-instructions---------------- | 2570 // --------Coprocessor-instructions---------------- |
2548 | 2571 |
2549 // Load, store, move. | 2572 // Load, store, move. |
2550 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { | 2573 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { |
2551 if (is_int16(src.offset_)) { | 2574 if (is_int16(src.offset_)) { |
2552 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); | 2575 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); |
2553 } else { // Offset > 16 bits, use multiple instructions to load. | 2576 } else { // Offset > 16 bits, use multiple instructions to load. |
2554 LoadRegPlusOffsetToAt(src); | 2577 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); |
2555 GenInstrImmediate(LWC1, at, fd, 0); | 2578 GenInstrImmediate(LWC1, at, fd, off16); |
2556 } | 2579 } |
2557 } | 2580 } |
2558 | 2581 |
2559 | 2582 |
2560 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { | 2583 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { |
2561 if (is_int16(src.offset_)) { | 2584 if (is_int16(src.offset_)) { |
2562 GenInstrImmediate(LDC1, src.rm(), fd, src.offset_); | 2585 GenInstrImmediate(LDC1, src.rm(), fd, src.offset_); |
2563 } else { // Offset > 16 bits, use multiple instructions to load. | 2586 } else { // Offset > 16 bits, use multiple instructions to load. |
2564 LoadRegPlusOffsetToAt(src); | 2587 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); |
2565 GenInstrImmediate(LDC1, at, fd, 0); | 2588 GenInstrImmediate(LDC1, at, fd, off16); |
2566 } | 2589 } |
2567 } | 2590 } |
2568 | 2591 |
2569 | 2592 |
2570 void Assembler::swc1(FPURegister fd, const MemOperand& src) { | 2593 void Assembler::swc1(FPURegister fd, const MemOperand& src) { |
2571 if (is_int16(src.offset_)) { | 2594 if (is_int16(src.offset_)) { |
2572 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); | 2595 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); |
2573 } else { // Offset > 16 bits, use multiple instructions to load. | 2596 } else { // Offset > 16 bits, use multiple instructions to load. |
2574 LoadRegPlusOffsetToAt(src); | 2597 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); |
2575 GenInstrImmediate(SWC1, at, fd, 0); | 2598 GenInstrImmediate(SWC1, at, fd, off16); |
2576 } | 2599 } |
2577 } | 2600 } |
2578 | 2601 |
2579 | 2602 |
2580 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { | 2603 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { |
2581 DCHECK(!src.rm().is(at)); | 2604 DCHECK(!src.rm().is(at)); |
2582 if (is_int16(src.offset_)) { | 2605 if (is_int16(src.offset_)) { |
2583 GenInstrImmediate(SDC1, src.rm(), fd, src.offset_); | 2606 GenInstrImmediate(SDC1, src.rm(), fd, src.offset_); |
2584 } else { // Offset > 16 bits, use multiple instructions to load. | 2607 } else { // Offset > 16 bits, use multiple instructions to load. |
2585 LoadRegPlusOffsetToAt(src); | 2608 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); |
2586 GenInstrImmediate(SDC1, at, fd, 0); | 2609 GenInstrImmediate(SDC1, at, fd, off16); |
2587 } | 2610 } |
2588 } | 2611 } |
2589 | 2612 |
2590 | 2613 |
2591 void Assembler::mtc1(Register rt, FPURegister fs) { | 2614 void Assembler::mtc1(Register rt, FPURegister fs) { |
2592 GenInstrRegister(COP1, MTC1, rt, fs, f0); | 2615 GenInstrRegister(COP1, MTC1, rt, fs, f0); |
2593 } | 2616 } |
2594 | 2617 |
2595 | 2618 |
2596 void Assembler::mthc1(Register rt, FPURegister fs) { | 2619 void Assembler::mthc1(Register rt, FPURegister fs) { |
(...skipping 885 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3482 | 3505 |
3483 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { | 3506 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { |
3484 Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize); | 3507 Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize); |
3485 } | 3508 } |
3486 } | 3509 } |
3487 | 3510 |
3488 } // namespace internal | 3511 } // namespace internal |
3489 } // namespace v8 | 3512 } // namespace v8 |
3490 | 3513 |
3491 #endif // V8_TARGET_ARCH_MIPS64 | 3514 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |