OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
165 s8_fp, | 165 s8_fp, |
166 ra | 166 ra |
167 }; | 167 }; |
168 return kRegisters[num]; | 168 return kRegisters[num]; |
169 } | 169 } |
170 | 170 |
171 | 171 |
172 // ----------------------------------------------------------------------------- | 172 // ----------------------------------------------------------------------------- |
173 // Implementation of RelocInfo. | 173 // Implementation of RelocInfo. |
174 | 174 |
175 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE; | 175 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask | |
| 176 1 << RelocInfo::INTERNAL_REFERENCE; |
176 | 177 |
177 | 178 |
178 bool RelocInfo::IsCodedSpecially() { | 179 bool RelocInfo::IsCodedSpecially() { |
179 // The deserializer needs to know whether a pointer is specially coded. Being | 180 // The deserializer needs to know whether a pointer is specially coded. Being |
180 // specially coded on MIPS means that it is a lui/ori instruction, and that is | 181 // specially coded on MIPS means that it is a lui/ori instruction, and that is |
181 // always the case inside code objects. | 182 // always the case inside code objects. |
182 return true; | 183 return true; |
183 } | 184 } |
184 | 185 |
185 | 186 |
(...skipping 353 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
539 } | 540 } |
540 | 541 |
541 | 542 |
542 bool Assembler::IsJ(Instr instr) { | 543 bool Assembler::IsJ(Instr instr) { |
543 uint32_t opcode = GetOpcodeField(instr); | 544 uint32_t opcode = GetOpcodeField(instr); |
544 // Checks if the instruction is a jump. | 545 // Checks if the instruction is a jump. |
545 return opcode == J; | 546 return opcode == J; |
546 } | 547 } |
547 | 548 |
548 | 549 |
| 550 bool Assembler::IsJal(Instr instr) { |
| 551 return GetOpcodeField(instr) == JAL; |
| 552 } |
| 553 |
| 554 bool Assembler::IsJr(Instr instr) { |
| 555 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR; |
| 556 } |
| 557 |
| 558 bool Assembler::IsJalr(Instr instr) { |
| 559 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR; |
| 560 } |
| 561 |
| 562 |
549 bool Assembler::IsLui(Instr instr) { | 563 bool Assembler::IsLui(Instr instr) { |
550 uint32_t opcode = GetOpcodeField(instr); | 564 uint32_t opcode = GetOpcodeField(instr); |
551 // Checks if the instruction is a load upper immediate. | 565 // Checks if the instruction is a load upper immediate. |
552 return opcode == LUI; | 566 return opcode == LUI; |
553 } | 567 } |
554 | 568 |
555 | 569 |
556 bool Assembler::IsOri(Instr instr) { | 570 bool Assembler::IsOri(Instr instr) { |
557 uint32_t opcode = GetOpcodeField(instr); | 571 uint32_t opcode = GetOpcodeField(instr); |
558 // Checks if the instruction is a load upper immediate. | 572 // Checks if the instruction is a load upper immediate. |
(...skipping 373 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
932 int32_t j) { | 946 int32_t j) { |
933 ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); | 947 ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); |
934 ASSERT(CpuFeatures::IsEnabled(FPU)); | 948 ASSERT(CpuFeatures::IsEnabled(FPU)); |
935 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) | 949 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) |
936 | (j & kImm16Mask); | 950 | (j & kImm16Mask); |
937 emit(instr); | 951 emit(instr); |
938 } | 952 } |
939 | 953 |
940 | 954 |
941 void Assembler::GenInstrJump(Opcode opcode, | 955 void Assembler::GenInstrJump(Opcode opcode, |
942 uint32_t address) { | 956 uint32_t address) { |
943 BlockTrampolinePoolScope block_trampoline_pool(this); | 957 BlockTrampolinePoolScope block_trampoline_pool(this); |
944 ASSERT(is_uint26(address)); | 958 ASSERT(is_uint26(address)); |
945 Instr instr = opcode | address; | 959 Instr instr = opcode | address; |
946 emit(instr); | 960 emit(instr); |
947 BlockTrampolinePoolFor(1); // For associated delay slot. | 961 BlockTrampolinePoolFor(1); // For associated delay slot. |
948 } | 962 } |
949 | 963 |
950 | 964 |
951 // Returns the next free trampoline entry. | 965 // Returns the next free trampoline entry. |
952 int32_t Assembler::get_trampoline_entry(int32_t pos) { | 966 int32_t Assembler::get_trampoline_entry(int32_t pos) { |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1105 | 1119 |
1106 | 1120 |
1107 void Assembler::bne(Register rs, Register rt, int16_t offset) { | 1121 void Assembler::bne(Register rs, Register rt, int16_t offset) { |
1108 BlockTrampolinePoolScope block_trampoline_pool(this); | 1122 BlockTrampolinePoolScope block_trampoline_pool(this); |
1109 GenInstrImmediate(BNE, rs, rt, offset); | 1123 GenInstrImmediate(BNE, rs, rt, offset); |
1110 BlockTrampolinePoolFor(1); // For associated delay slot. | 1124 BlockTrampolinePoolFor(1); // For associated delay slot. |
1111 } | 1125 } |
1112 | 1126 |
1113 | 1127 |
1114 void Assembler::j(int32_t target) { | 1128 void Assembler::j(int32_t target) { |
1115 ASSERT(is_uint28(target) && ((target & 3) == 0)); | 1129 #if DEBUG |
| 1130 // Get pc of delay slot. |
| 1131 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); |
| 1132 bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0; |
| 1133 ASSERT(in_range && ((target & 3) == 0)); |
| 1134 #endif |
1116 GenInstrJump(J, target >> 2); | 1135 GenInstrJump(J, target >> 2); |
1117 } | 1136 } |
1118 | 1137 |
1119 | 1138 |
1120 void Assembler::jr(Register rs) { | 1139 void Assembler::jr(Register rs) { |
1121 BlockTrampolinePoolScope block_trampoline_pool(this); | 1140 BlockTrampolinePoolScope block_trampoline_pool(this); |
1122 if (rs.is(ra)) { | 1141 if (rs.is(ra)) { |
1123 positions_recorder()->WriteRecordedPositions(); | 1142 positions_recorder()->WriteRecordedPositions(); |
1124 } | 1143 } |
1125 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR); | 1144 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR); |
1126 BlockTrampolinePoolFor(1); // For associated delay slot. | 1145 BlockTrampolinePoolFor(1); // For associated delay slot. |
1127 } | 1146 } |
1128 | 1147 |
1129 | 1148 |
1130 void Assembler::jal(int32_t target) { | 1149 void Assembler::jal(int32_t target) { |
| 1150 #ifdef DEBUG |
| 1151 // Get pc of delay slot. |
| 1152 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); |
| 1153 bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0; |
| 1154 ASSERT(in_range && ((target & 3) == 0)); |
| 1155 #endif |
1131 positions_recorder()->WriteRecordedPositions(); | 1156 positions_recorder()->WriteRecordedPositions(); |
1132 ASSERT(is_uint28(target) && ((target & 3) == 0)); | |
1133 GenInstrJump(JAL, target >> 2); | 1157 GenInstrJump(JAL, target >> 2); |
1134 } | 1158 } |
1135 | 1159 |
1136 | 1160 |
1137 void Assembler::jalr(Register rs, Register rd) { | 1161 void Assembler::jalr(Register rs, Register rd) { |
1138 BlockTrampolinePoolScope block_trampoline_pool(this); | 1162 BlockTrampolinePoolScope block_trampoline_pool(this); |
1139 positions_recorder()->WriteRecordedPositions(); | 1163 positions_recorder()->WriteRecordedPositions(); |
1140 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); | 1164 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); |
1141 BlockTrampolinePoolFor(1); // For associated delay slot. | 1165 BlockTrampolinePoolFor(1); // For associated delay slot. |
1142 } | 1166 } |
1143 | 1167 |
1144 | 1168 |
| 1169 void Assembler::j_or_jr(int32_t target, Register rs) { |
| 1170 // Get pc of delay slot. |
| 1171 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); |
| 1172 bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0; |
| 1173 |
| 1174 if (in_range) { |
| 1175 j(target); |
| 1176 } else { |
| 1177 jr(t9); |
| 1178 } |
| 1179 } |
| 1180 |
| 1181 |
| 1182 void Assembler::jal_or_jalr(int32_t target, Register rs) { |
| 1183 // Get pc of delay slot. |
| 1184 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); |
| 1185 bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0; |
| 1186 |
| 1187 if (in_range) { |
| 1188 jal(target); |
| 1189 } else { |
| 1190 jalr(t9); |
| 1191 } |
| 1192 } |
| 1193 |
| 1194 |
1145 //-------Data-processing-instructions--------- | 1195 //-------Data-processing-instructions--------- |
1146 | 1196 |
1147 // Arithmetic. | 1197 // Arithmetic. |
1148 | 1198 |
1149 void Assembler::addu(Register rd, Register rs, Register rt) { | 1199 void Assembler::addu(Register rd, Register rs, Register rt) { |
1150 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU); | 1200 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU); |
1151 } | 1201 } |
1152 | 1202 |
1153 | 1203 |
1154 void Assembler::addiu(Register rd, Register rs, int32_t j) { | 1204 void Assembler::addiu(Register rd, Register rs, int32_t j) { |
(...skipping 452 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1607 | 1657 |
1608 void Assembler::ctc1(Register rt, FPUControlRegister fs) { | 1658 void Assembler::ctc1(Register rt, FPUControlRegister fs) { |
1609 GenInstrRegister(COP1, CTC1, rt, fs); | 1659 GenInstrRegister(COP1, CTC1, rt, fs); |
1610 } | 1660 } |
1611 | 1661 |
1612 | 1662 |
1613 void Assembler::cfc1(Register rt, FPUControlRegister fs) { | 1663 void Assembler::cfc1(Register rt, FPUControlRegister fs) { |
1614 GenInstrRegister(COP1, CFC1, rt, fs); | 1664 GenInstrRegister(COP1, CFC1, rt, fs); |
1615 } | 1665 } |
1616 | 1666 |
| 1667 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { |
| 1668 uint64_t i; |
| 1669 memcpy(&i, &d, 8); |
| 1670 |
| 1671 *lo = i & 0xffffffff; |
| 1672 *hi = i >> 32; |
| 1673 } |
1617 | 1674 |
1618 // Arithmetic. | 1675 // Arithmetic. |
1619 | 1676 |
1620 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) { | 1677 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) { |
1621 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D); | 1678 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D); |
1622 } | 1679 } |
1623 | 1680 |
1624 | 1681 |
1625 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) { | 1682 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) { |
1626 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D); | 1683 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D); |
(...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1965 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { | 2022 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { |
1966 // Adjust code for new modes. | 2023 // Adjust code for new modes. |
1967 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) | 2024 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) |
1968 || RelocInfo::IsJSReturn(rmode) | 2025 || RelocInfo::IsJSReturn(rmode) |
1969 || RelocInfo::IsComment(rmode) | 2026 || RelocInfo::IsComment(rmode) |
1970 || RelocInfo::IsPosition(rmode)); | 2027 || RelocInfo::IsPosition(rmode)); |
1971 // These modes do not need an entry in the constant pool. | 2028 // These modes do not need an entry in the constant pool. |
1972 } | 2029 } |
1973 if (rinfo.rmode() != RelocInfo::NONE) { | 2030 if (rinfo.rmode() != RelocInfo::NONE) { |
1974 // Don't record external references unless the heap will be serialized. | 2031 // Don't record external references unless the heap will be serialized. |
1975 if (rmode == RelocInfo::EXTERNAL_REFERENCE && | 2032 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { |
1976 !Serializer::enabled() && | 2033 #ifdef DEBUG |
1977 !FLAG_debug_code) { | 2034 if (!Serializer::enabled()) { |
1978 return; | 2035 Serializer::TooLateToEnableNow(); |
| 2036 } |
| 2037 #endif |
| 2038 if (!Serializer::enabled() && !emit_debug_code()) { |
| 2039 return; |
| 2040 } |
1979 } | 2041 } |
1980 ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here. | 2042 ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here. |
1981 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { | 2043 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { |
1982 RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId()); | 2044 RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId()); |
1983 ClearRecordedAstId(); | 2045 ClearRecordedAstId(); |
1984 reloc_info_writer.Write(&reloc_info_with_ast_id); | 2046 reloc_info_writer.Write(&reloc_info_with_ast_id); |
1985 } else { | 2047 } else { |
1986 reloc_info_writer.Write(&rinfo); | 2048 reloc_info_writer.Write(&rinfo); |
1987 } | 2049 } |
1988 } | 2050 } |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2063 return reinterpret_cast<Address>( | 2125 return reinterpret_cast<Address>( |
2064 (GetImmediate16(instr1) << 16) | GetImmediate16(instr2)); | 2126 (GetImmediate16(instr1) << 16) | GetImmediate16(instr2)); |
2065 } | 2127 } |
2066 | 2128 |
2067 // We should never get here, force a bad address if we do. | 2129 // We should never get here, force a bad address if we do. |
2068 UNREACHABLE(); | 2130 UNREACHABLE(); |
2069 return (Address)0x0; | 2131 return (Address)0x0; |
2070 } | 2132 } |
2071 | 2133 |
2072 | 2134 |
| 2135 // On Mips, a target address is stored in a lui/ori instruction pair, each |
| 2136 // of which load 16 bits of the 32-bit address to a register. |
| 2137 // Patching the address must replace both instr, and flush the i-cache. |
| 2138 // |
| 2139 // There is an optimization below, which emits a nop when the address |
| 2140 // fits in just 16 bits. This is unlikely to help, and should be benchmarked, |
| 2141 // and possibly removed. |
2073 void Assembler::set_target_address_at(Address pc, Address target) { | 2142 void Assembler::set_target_address_at(Address pc, Address target) { |
2074 // On MIPS we patch the address into lui/ori instruction pair. | |
2075 | |
2076 // First check we have an li (lui/ori pair). | |
2077 Instr instr2 = instr_at(pc + kInstrSize); | 2143 Instr instr2 = instr_at(pc + kInstrSize); |
2078 #ifdef DEBUG | |
2079 Instr instr1 = instr_at(pc); | |
2080 | |
2081 // Check we have indeed the result from a li with MustUseReg true. | |
2082 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI)); | |
2083 #endif | |
2084 | |
2085 uint32_t rt_code = GetRtField(instr2); | 2144 uint32_t rt_code = GetRtField(instr2); |
2086 uint32_t* p = reinterpret_cast<uint32_t*>(pc); | 2145 uint32_t* p = reinterpret_cast<uint32_t*>(pc); |
2087 uint32_t itarget = reinterpret_cast<uint32_t>(target); | 2146 uint32_t itarget = reinterpret_cast<uint32_t>(target); |
2088 | 2147 |
2089 // lui rt, high-16. | 2148 #ifdef DEBUG |
2090 // ori rt rt, low-16. | 2149 // Check we have the result from a li macro-instruction, using instr pair. |
| 2150 Instr instr1 = instr_at(pc); |
| 2151 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI)); |
| 2152 #endif |
| 2153 |
| 2154 // Must use 2 instructions to insure patchable code => just use lui and ori. |
| 2155 // lui rt, upper-16. |
| 2156 // ori rt rt, lower-16. |
2091 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); | 2157 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); |
2092 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); | 2158 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); |
2093 | 2159 |
2094 CPU::FlushICache(pc, 2 * sizeof(int32_t)); | 2160 // The following code is an optimization for the common case of Call() |
| 2161 // or Jump() which is load to register, and jump through register: |
| 2162 // li(t9, address); jalr(t9) (or jr(t9)). |
| 2163 // If the destination address is in the same 256 MB page as the call, it |
| 2164 // is faster to do a direct jal, or j, rather than jump thru register, since |
| 2165 // that lets the cpu pipeline prefetch the target address. However each |
| 2166 // time the address above is patched, we have to patch the direct jal/j |
| 2167 // instruction, as well as possibly revert to jalr/jr if we now cross a |
| 2168 // 256 MB page. Note that with the jal/j instructions, we do not need to |
| 2169 // load the register, but that code is left, since it makes it easy to |
| 2170 // revert this process. A further optimization could try replacing the |
| 2171 // li sequence with nops. |
| 2172 // This optimization can only be applied if the rt-code from instr2 is the |
| 2173 // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is |
| 2174 // mips return. Occasionally this lands after an li(). |
| 2175 |
| 2176 Instr instr3 = instr_at(pc + 2 * kInstrSize); |
| 2177 uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize); |
| 2178 bool in_range = |
| 2179 ((uint32_t)(ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0; |
| 2180 uint32_t target_field = (uint32_t)(itarget & kJumpAddrMask) >> kImmFieldShift; |
| 2181 bool patched_jump = false; |
| 2182 |
| 2183 #ifndef ALLOW_JAL_IN_BOUNDARY_REGION |
| 2184 // This is a workaround to the 24k core E156 bug (affect some 34k cores also). |
| 2185 // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just |
| 2186 // apply this workaround for all cores so we don't have to identify the core. |
| 2187 if (in_range) { |
| 2188 // The 24k core E156 bug has some very specific requirements, we only check |
| 2189 // the most simple one: if the address of the delay slot instruction is in |
| 2190 // the first or last 32 KB of the 256 MB segment. |
| 2191 uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1); |
| 2192 uint32_t ipc_segment_addr = ipc & segment_mask; |
| 2193 if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask) |
| 2194 in_range = false; |
| 2195 } |
| 2196 #endif |
| 2197 |
| 2198 if (IsJalr(instr3)) { |
| 2199 // Try to convert JALR to JAL. |
| 2200 if (in_range && GetRt(instr2) == GetRs(instr3)) { |
| 2201 *(p+2) = JAL | target_field; |
| 2202 patched_jump = true; |
| 2203 } |
| 2204 } else if (IsJr(instr3)) { |
| 2205 // Try to convert JR to J, skip returns (jr ra). |
| 2206 bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code(); |
| 2207 if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) { |
| 2208 *(p+2) = J | target_field; |
| 2209 patched_jump = true; |
| 2210 } |
| 2211 } else if (IsJal(instr3)) { |
| 2212 if (in_range) { |
| 2213 // We are patching an already converted JAL. |
| 2214 *(p+2) = JAL | target_field; |
| 2215 } else { |
| 2216 // Patch JAL, but out of range, revert to JALR. |
| 2217 // JALR rs reg is the rt reg specified in the ORI instruction. |
| 2218 uint32_t rs_field = GetRt(instr2) << kRsShift; |
| 2219 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg. |
| 2220 *(p+2) = SPECIAL | rs_field | rd_field | JALR; |
| 2221 } |
| 2222 patched_jump = true; |
| 2223 } else if (IsJ(instr3)) { |
| 2224 if (in_range) { |
| 2225 // We are patching an already converted J (jump). |
| 2226 *(p+2) = J | target_field; |
| 2227 } else { |
| 2228 // Trying patch J, but out of range, just go back to JR. |
| 2229 // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2). |
| 2230 uint32_t rs_field = GetRt(instr2) << kRsShift; |
| 2231 *(p+2) = SPECIAL | rs_field | JR; |
| 2232 } |
| 2233 patched_jump = true; |
| 2234 } |
| 2235 |
| 2236 CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t)); |
2095 } | 2237 } |
2096 | 2238 |
| 2239 void Assembler::JumpLabelToJumpRegister(Address pc) { |
| 2240 // Address pc points to lui/ori instructions. |
| 2241 // Jump to label may follow at pc + 2 * kInstrSize. |
| 2242 uint32_t* p = reinterpret_cast<uint32_t*>(pc); |
| 2243 #ifdef DEBUG |
| 2244 Instr instr1 = instr_at(pc); |
| 2245 #endif |
| 2246 Instr instr2 = instr_at(pc + 1 * kInstrSize); |
| 2247 Instr instr3 = instr_at(pc + 2 * kInstrSize); |
| 2248 bool patched = false; |
| 2249 |
| 2250 if (IsJal(instr3)) { |
| 2251 ASSERT(GetOpcodeField(instr1) == LUI); |
| 2252 ASSERT(GetOpcodeField(instr2) == ORI); |
| 2253 |
| 2254 uint32_t rs_field = GetRt(instr2) << kRsShift; |
| 2255 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg. |
| 2256 *(p+2) = SPECIAL | rs_field | rd_field | JALR; |
| 2257 patched = true; |
| 2258 } else if (IsJ(instr3)) { |
| 2259 ASSERT(GetOpcodeField(instr1) == LUI); |
| 2260 ASSERT(GetOpcodeField(instr2) == ORI); |
| 2261 |
| 2262 uint32_t rs_field = GetRt(instr2) << kRsShift; |
| 2263 *(p+2) = SPECIAL | rs_field | JR; |
| 2264 patched = true; |
| 2265 } |
| 2266 |
| 2267 if (patched) { |
| 2268 CPU::FlushICache(pc+2, sizeof(Address)); |
| 2269 } |
| 2270 } |
2097 | 2271 |
2098 } } // namespace v8::internal | 2272 } } // namespace v8::internal |
2099 | 2273 |
2100 #endif // V8_TARGET_ARCH_MIPS | 2274 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |