| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 2979 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2990 Instr instr1 = instr_at(pc); | 2990 Instr instr1 = instr_at(pc); |
| 2991 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI)); | 2991 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI)); |
| 2992 #endif | 2992 #endif |
| 2993 | 2993 |
| 2994 // Must use 2 instructions to insure patchable code => just use lui and ori. | 2994 // Must use 2 instructions to insure patchable code => just use lui and ori. |
| 2995 // lui rt, upper-16. | 2995 // lui rt, upper-16. |
| 2996 // ori rt rt, lower-16. | 2996 // ori rt rt, lower-16. |
| 2997 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); | 2997 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); |
| 2998 *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); | 2998 *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); |
| 2999 | 2999 |
| 3000 // The following code is an optimization for the common case of Call() | |
| 3001 // or Jump() which is load to register, and jump through register: | |
| 3002 // li(t9, address); jalr(t9) (or jr(t9)). | |
| 3003 // If the destination address is in the same 256 MB page as the call, it | |
| 3004 // is faster to do a direct jal, or j, rather than jump thru register, since | |
| 3005 // that lets the cpu pipeline prefetch the target address. However each | |
| 3006 // time the address above is patched, we have to patch the direct jal/j | |
| 3007 // instruction, as well as possibly revert to jalr/jr if we now cross a | |
| 3008 // 256 MB page. Note that with the jal/j instructions, we do not need to | |
| 3009 // load the register, but that code is left, since it makes it easy to | |
| 3010 // revert this process. A further optimization could try replacing the | |
| 3011 // li sequence with nops. | |
| 3012 // This optimization can only be applied if the rt-code from instr2 is the | |
| 3013 // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is | |
| 3014 // mips return. Occasionally this lands after an li(). | |
| 3015 | |
| 3016 Instr instr3 = instr_at(pc + 2 * kInstrSize); | |
| 3017 uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize); | |
| 3018 bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0; | |
| 3019 uint32_t target_field = | |
| 3020 static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift; | |
| 3021 bool patched_jump = false; | |
| 3022 | |
| 3023 #ifndef ALLOW_JAL_IN_BOUNDARY_REGION | |
| 3024 // This is a workaround to the 24k core E156 bug (affect some 34k cores also). | |
| 3025 // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just | |
| 3026 // apply this workaround for all cores so we don't have to identify the core. | |
| 3027 if (in_range) { | |
| 3028 // The 24k core E156 bug has some very specific requirements, we only check | |
| 3029 // the most simple one: if the address of the delay slot instruction is in | |
| 3030 // the first or last 32 KB of the 256 MB segment. | |
| 3031 uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1); | |
| 3032 uint32_t ipc_segment_addr = ipc & segment_mask; | |
| 3033 if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask) | |
| 3034 in_range = false; | |
| 3035 } | |
| 3036 #endif | |
| 3037 | |
| 3038 if (IsJalr(instr3)) { | |
| 3039 // Try to convert JALR to JAL. | |
| 3040 if (in_range && GetRt(instr2) == GetRs(instr3)) { | |
| 3041 *(p + 2) = JAL | target_field; | |
| 3042 patched_jump = true; | |
| 3043 } | |
| 3044 } else if (IsJr(instr3)) { | |
| 3045 // Try to convert JR to J, skip returns (jr ra). | |
| 3046 bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code(); | |
| 3047 if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) { | |
| 3048 *(p + 2) = J | target_field; | |
| 3049 patched_jump = true; | |
| 3050 } | |
| 3051 } else if (IsJal(instr3)) { | |
| 3052 if (in_range) { | |
| 3053 // We are patching an already converted JAL. | |
| 3054 *(p + 2) = JAL | target_field; | |
| 3055 } else { | |
| 3056 // Patch JAL, but out of range, revert to JALR. | |
| 3057 // JALR rs reg is the rt reg specified in the ORI instruction. | |
| 3058 uint32_t rs_field = GetRt(instr2) << kRsShift; | |
| 3059 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg. | |
| 3060 *(p+2) = SPECIAL | rs_field | rd_field | JALR; | |
| 3061 } | |
| 3062 patched_jump = true; | |
| 3063 } else if (IsJ(instr3)) { | |
| 3064 if (in_range) { | |
| 3065 // We are patching an already converted J (jump). | |
| 3066 *(p + 2) = J | target_field; | |
| 3067 } else { | |
| 3068 // Trying patch J, but out of range, just go back to JR. | |
| 3069 // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2). | |
| 3070 uint32_t rs_field = GetRt(instr2) << kRsShift; | |
| 3071 if (IsMipsArchVariant(kMips32r6)) { | |
| 3072 *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR; | |
| 3073 } else { | |
| 3074 *(p + 2) = SPECIAL | rs_field | JR; | |
| 3075 } | |
| 3076 } | |
| 3077 patched_jump = true; | |
| 3078 } | |
| 3079 | 3000 |
| 3080 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { | 3001 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { |
| 3081 CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t)); | 3002 CpuFeatures::FlushICache(pc, 2 * sizeof(int32_t)); |
| 3082 } | 3003 } |
| 3083 } | 3004 } |
| 3084 | 3005 |
| 3085 | |
| 3086 void Assembler::JumpToJumpRegister(Address pc) { | |
| 3087 // Address pc points to lui/ori instructions. | |
| 3088 // Jump to label may follow at pc + 2 * kInstrSize. | |
| 3089 uint32_t* p = reinterpret_cast<uint32_t*>(pc); | |
| 3090 #ifdef DEBUG | |
| 3091 Instr instr1 = instr_at(pc); | |
| 3092 #endif | |
| 3093 Instr instr2 = instr_at(pc + 1 * kInstrSize); | |
| 3094 Instr instr3 = instr_at(pc + 2 * kInstrSize); | |
| 3095 bool patched = false; | |
| 3096 | |
| 3097 if (IsJal(instr3)) { | |
| 3098 DCHECK(GetOpcodeField(instr1) == LUI); | |
| 3099 DCHECK(GetOpcodeField(instr2) == ORI); | |
| 3100 | |
| 3101 uint32_t rs_field = GetRt(instr2) << kRsShift; | |
| 3102 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg. | |
| 3103 *(p + 2) = SPECIAL | rs_field | rd_field | JALR; | |
| 3104 patched = true; | |
| 3105 } else if (IsJ(instr3)) { | |
| 3106 DCHECK(GetOpcodeField(instr1) == LUI); | |
| 3107 DCHECK(GetOpcodeField(instr2) == ORI); | |
| 3108 | |
| 3109 uint32_t rs_field = GetRt(instr2) << kRsShift; | |
| 3110 if (IsMipsArchVariant(kMips32r6)) { | |
| 3111 *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR; | |
| 3112 } else { | |
| 3113 *(p + 2) = SPECIAL | rs_field | JR; | |
| 3114 } | |
| 3115 patched = true; | |
| 3116 } | |
| 3117 | |
| 3118 if (patched) { | |
| 3119 CpuFeatures::FlushICache(pc + 2, sizeof(Address)); | |
| 3120 } | |
| 3121 } | |
| 3122 | |
| 3123 | |
| 3124 } // namespace internal | 3006 } // namespace internal |
| 3125 } // namespace v8 | 3007 } // namespace v8 |
| 3126 | 3008 |
| 3127 #endif // V8_TARGET_ARCH_MIPS | 3009 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |