| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions |
| 6 // are met: | 6 // are met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 417 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 428 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX; | 428 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX; |
| 429 const Instr kBlxIp = al | kBlxRegPattern | ip.code(); | 429 const Instr kBlxIp = al | kBlxRegPattern | ip.code(); |
| 430 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; | 430 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; |
| 431 const Instr kMovMvnPattern = 0xd * B21; | 431 const Instr kMovMvnPattern = 0xd * B21; |
| 432 const Instr kMovMvnFlip = B22; | 432 const Instr kMovMvnFlip = B22; |
| 433 const Instr kMovLeaveCCMask = 0xdff * B16; | 433 const Instr kMovLeaveCCMask = 0xdff * B16; |
| 434 const Instr kMovLeaveCCPattern = 0x1a0 * B16; | 434 const Instr kMovLeaveCCPattern = 0x1a0 * B16; |
| 435 const Instr kMovwPattern = 0x30 * B20; | 435 const Instr kMovwPattern = 0x30 * B20; |
| 436 const Instr kMovtPattern = 0x34 * B20; | 436 const Instr kMovtPattern = 0x34 * B20; |
| 437 const Instr kMovwLeaveCCFlip = 0x5 * B21; | 437 const Instr kMovwLeaveCCFlip = 0x5 * B21; |
| 438 const Instr kMovImmedMask = 0x7f * B21; |
| 439 const Instr kMovImmedPattern = 0x1d * B21; |
| 440 const Instr kOrrImmedMask = 0x7f * B21; |
| 441 const Instr kOrrImmedPattern = 0x1c * B21; |
| 438 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; | 442 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; |
| 439 const Instr kCmpCmnPattern = 0x15 * B20; | 443 const Instr kCmpCmnPattern = 0x15 * B20; |
| 440 const Instr kCmpCmnFlip = B21; | 444 const Instr kCmpCmnFlip = B21; |
| 441 const Instr kAddSubFlip = 0x6 * B21; | 445 const Instr kAddSubFlip = 0x6 * B21; |
| 442 const Instr kAndBicFlip = 0xe * B21; | 446 const Instr kAndBicFlip = 0xe * B21; |
| 443 | 447 |
| 444 // A mask for the Rd register for push, pop, ldr, str instructions. | 448 // A mask for the Rd register for push, pop, ldr, str instructions. |
| 445 const Instr kLdrRegFpOffsetPattern = | 449 const Instr kLdrRegFpOffsetPattern = |
| 446 al | B26 | L | Offset | kRegister_fp_Code * B16; | 450 al | B26 | L | Offset | kRegister_fp_Code * B16; |
| 447 const Instr kStrRegFpOffsetPattern = | 451 const Instr kStrRegFpOffsetPattern = |
| (...skipping 597 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1045 } else if (RelocInfo::IsNone(rmode_)) { | 1049 } else if (RelocInfo::IsNone(rmode_)) { |
| 1046 return false; | 1050 return false; |
| 1047 } | 1051 } |
| 1048 return true; | 1052 return true; |
| 1049 } | 1053 } |
| 1050 | 1054 |
| 1051 | 1055 |
| 1052 static bool use_mov_immediate_load(const Operand& x, | 1056 static bool use_mov_immediate_load(const Operand& x, |
| 1053 const Assembler* assembler) { | 1057 const Assembler* assembler) { |
| 1054 if (assembler != NULL && !assembler->is_constant_pool_available()) { | 1058 if (assembler != NULL && !assembler->is_constant_pool_available()) { |
| 1055 // If there is no constant pool available, we must use an mov immediate. | |
| 1056 // TODO(rmcilroy): enable ARMv6 support. | |
| 1057 DCHECK(CpuFeatures::IsSupported(ARMv7)); | |
| 1058 return true; | 1059 return true; |
| 1059 } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && | 1060 } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && |
| 1060 (assembler == NULL || !assembler->predictable_code_size())) { | 1061 (assembler == NULL || !assembler->predictable_code_size())) { |
| 1061 // Prefer movw / movt to constant pool if it is more efficient on the CPU. | 1062 // Prefer movw / movt to constant pool if it is more efficient on the CPU. |
| 1062 return true; | 1063 return true; |
| 1063 } else if (x.must_output_reloc_info(assembler)) { | 1064 } else if (x.must_output_reloc_info(assembler)) { |
| 1064 // Prefer constant pool if data is likely to be patched. | 1065 // Prefer constant pool if data is likely to be patched. |
| 1065 return false; | 1066 return false; |
| 1066 } else { | 1067 } else { |
| 1067 // Otherwise, use immediate load if movw / movt is available. | 1068 // Otherwise, use immediate load if movw / movt is available. |
| 1068 return CpuFeatures::IsSupported(ARMv7); | 1069 return CpuFeatures::IsSupported(ARMv7); |
| 1069 } | 1070 } |
| 1070 } | 1071 } |
| 1071 | 1072 |
| 1072 | 1073 |
| 1073 int Operand::instructions_required(const Assembler* assembler, | 1074 int Operand::instructions_required(const Assembler* assembler, |
| 1074 Instr instr) const { | 1075 Instr instr) const { |
| 1075 if (rm_.is_valid()) return 1; | 1076 if (rm_.is_valid()) return 1; |
| 1076 uint32_t dummy1, dummy2; | 1077 uint32_t dummy1, dummy2; |
| 1077 if (must_output_reloc_info(assembler) || | 1078 if (must_output_reloc_info(assembler) || |
| 1078 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { | 1079 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { |
| 1079 // The immediate operand cannot be encoded as a shifter operand, or use of | 1080 // The immediate operand cannot be encoded as a shifter operand, or use of |
| 1080 // constant pool is required. First account for the instructions required | 1081 // constant pool is required. First account for the instructions required |
| 1081 // for the constant pool or immediate load | 1082 // for the constant pool or immediate load |
| 1082 int instructions; | 1083 int instructions; |
| 1083 if (use_mov_immediate_load(*this, assembler)) { | 1084 if (use_mov_immediate_load(*this, assembler)) { |
| 1084 instructions = 2; // A movw, movt immediate load. | 1085 // A movw / movt or mov / orr immediate load. |
| 1086 instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4; |
| 1085 } else if (assembler != NULL && assembler->use_extended_constant_pool()) { | 1087 } else if (assembler != NULL && assembler->use_extended_constant_pool()) { |
| 1086 instructions = 3; // An extended constant pool load. | 1088 // An extended constant pool load. |
| 1089 instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5; |
| 1087 } else { | 1090 } else { |
| 1088 instructions = 1; // A small constant pool load. | 1091 // A small constant pool load. |
| 1092 instructions = 1; |
| 1089 } | 1093 } |
| 1090 | 1094 |
| 1091 if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set | 1095 if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set |
| 1092 // For a mov or mvn instruction which doesn't set the condition | 1096 // For a mov or mvn instruction which doesn't set the condition |
| 1093 // code, the constant pool or immediate load is enough, otherwise we need | 1097 // code, the constant pool or immediate load is enough, otherwise we need |
| 1094 // to account for the actual instruction being requested. | 1098 // to account for the actual instruction being requested. |
| 1095 instructions += 1; | 1099 instructions += 1; |
| 1096 } | 1100 } |
| 1097 return instructions; | 1101 return instructions; |
| 1098 } else { | 1102 } else { |
| 1099 // No use of constant pool and the immediate operand can be encoded as a | 1103 // No use of constant pool and the immediate operand can be encoded as a |
| 1100 // shifter operand. | 1104 // shifter operand. |
| 1101 return 1; | 1105 return 1; |
| 1102 } | 1106 } |
| 1103 } | 1107 } |
| 1104 | 1108 |
| 1105 | 1109 |
| 1106 void Assembler::move_32_bit_immediate(Register rd, | 1110 void Assembler::move_32_bit_immediate(Register rd, |
| 1107 const Operand& x, | 1111 const Operand& x, |
| 1108 Condition cond) { | 1112 Condition cond) { |
| 1109 RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL); | 1113 RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL); |
| 1114 uint32_t imm32 = static_cast<uint32_t>(x.imm32_); |
| 1110 if (x.must_output_reloc_info(this)) { | 1115 if (x.must_output_reloc_info(this)) { |
| 1111 RecordRelocInfo(rinfo); | 1116 RecordRelocInfo(rinfo); |
| 1112 } | 1117 } |
| 1113 | 1118 |
| 1114 if (use_mov_immediate_load(x, this)) { | 1119 if (use_mov_immediate_load(x, this)) { |
| 1115 Register target = rd.code() == pc.code() ? ip : rd; | 1120 Register target = rd.code() == pc.code() ? ip : rd; |
| 1116 // TODO(rmcilroy): add ARMv6 support for immediate loads. | 1121 if (CpuFeatures::IsSupported(ARMv7)) { |
| 1117 DCHECK(CpuFeatures::IsSupported(ARMv7)); | 1122 if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) { |
| 1118 if (!FLAG_enable_ool_constant_pool && | 1123 // Make sure the movw/movt doesn't get separated. |
| 1119 x.must_output_reloc_info(this)) { | 1124 BlockConstPoolFor(2); |
| 1120 // Make sure the movw/movt doesn't get separated. | 1125 } |
| 1121 BlockConstPoolFor(2); | 1126 movw(target, imm32 & 0xffff, cond); |
| 1127 movt(target, imm32 >> 16, cond); |
| 1128 } else { |
| 1129 DCHECK(FLAG_enable_ool_constant_pool); |
| 1130 mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond); |
| 1131 orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond); |
| 1132 orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond); |
| 1133 orr(target, target, Operand(imm32 & (kImm8Mask << 24)), LeaveCC, cond); |
| 1122 } | 1134 } |
| 1123 movw(target, static_cast<uint32_t>(x.imm32_ & 0xffff), cond); | |
| 1124 movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond); | |
| 1125 if (target.code() != rd.code()) { | 1135 if (target.code() != rd.code()) { |
| 1126 mov(rd, target, LeaveCC, cond); | 1136 mov(rd, target, LeaveCC, cond); |
| 1127 } | 1137 } |
| 1128 } else { | 1138 } else { |
| 1129 DCHECK(is_constant_pool_available()); | 1139 DCHECK(is_constant_pool_available()); |
| 1130 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); | 1140 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); |
| 1131 if (section == ConstantPoolArray::EXTENDED_SECTION) { | 1141 if (section == ConstantPoolArray::EXTENDED_SECTION) { |
| 1132 DCHECK(FLAG_enable_ool_constant_pool); | 1142 DCHECK(FLAG_enable_ool_constant_pool); |
| 1133 Register target = rd.code() == pc.code() ? ip : rd; | 1143 Register target = rd.code() == pc.code() ? ip : rd; |
| 1134 // Emit instructions to load constant pool offset. | 1144 // Emit instructions to load constant pool offset. |
| 1135 movw(target, 0, cond); | 1145 if (CpuFeatures::IsSupported(ARMv7)) { |
| 1136 movt(target, 0, cond); | 1146 movw(target, 0, cond); |
| 1147 movt(target, 0, cond); |
| 1148 } else { |
| 1149 mov(target, Operand(0), LeaveCC, cond); |
| 1150 orr(target, target, Operand(0), LeaveCC, cond); |
| 1151 orr(target, target, Operand(0), LeaveCC, cond); |
| 1152 orr(target, target, Operand(0), LeaveCC, cond); |
| 1153 } |
| 1137 // Load from constant pool at offset. | 1154 // Load from constant pool at offset. |
| 1138 ldr(rd, MemOperand(pp, target), cond); | 1155 ldr(rd, MemOperand(pp, target), cond); |
| 1139 } else { | 1156 } else { |
| 1140 DCHECK(section == ConstantPoolArray::SMALL_SECTION); | 1157 DCHECK(section == ConstantPoolArray::SMALL_SECTION); |
| 1141 ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond); | 1158 ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond); |
| 1142 } | 1159 } |
| 1143 } | 1160 } |
| 1144 } | 1161 } |
| 1145 | 1162 |
| 1146 | 1163 |
| (...skipping 1993 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3140 return ((immediate & 0xf000) << 4) | (immediate & 0xfff); | 3157 return ((immediate & 0xf000) << 4) | (immediate & 0xfff); |
| 3141 } | 3158 } |
| 3142 | 3159 |
| 3143 | 3160 |
| 3144 Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) { | 3161 Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) { |
| 3145 instruction &= ~EncodeMovwImmediate(0xffff); | 3162 instruction &= ~EncodeMovwImmediate(0xffff); |
| 3146 return instruction | EncodeMovwImmediate(immediate); | 3163 return instruction | EncodeMovwImmediate(immediate); |
| 3147 } | 3164 } |
| 3148 | 3165 |
| 3149 | 3166 |
| 3167 int Assembler::DecodeShiftImm(Instr instr) { |
| 3168 int rotate = Instruction::RotateValue(instr) * 2; |
| 3169 int immed8 = Instruction::Immed8Value(instr); |
| 3170 return (immed8 >> rotate) | (immed8 << (32 - rotate)); |
| 3171 } |
| 3172 |
| 3173 |
| 3174 Instr Assembler::PatchShiftImm(Instr instr, int immed) { |
| 3175 uint32_t rotate_imm = 0; |
| 3176 uint32_t immed_8 = 0; |
| 3177 bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL); |
| 3178 DCHECK(immed_fits); |
| 3179 return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8; |
| 3180 } |
| 3181 |
| 3182 |
| 3150 bool Assembler::IsNop(Instr instr, int type) { | 3183 bool Assembler::IsNop(Instr instr, int type) { |
| 3151 DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop. | 3184 DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop. |
| 3152 // Check for mov rx, rx where x = type. | 3185 // Check for mov rx, rx where x = type. |
| 3153 return instr == (al | 13*B21 | type*B12 | type); | 3186 return instr == (al | 13*B21 | type*B12 | type); |
| 3154 } | 3187 } |
| 3155 | 3188 |
| 3156 | 3189 |
| 3190 bool Assembler::IsMovImmed(Instr instr) { |
| 3191 return (instr & kMovImmedMask) == kMovImmedPattern; |
| 3192 } |
| 3193 |
| 3194 |
| 3195 bool Assembler::IsOrrImmed(Instr instr) { |
| 3196 return (instr & kOrrImmedMask) == kOrrImmedPattern; |
| 3197 } |
| 3198 |
| 3199 |
| 3157 // static | 3200 // static |
| 3158 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { | 3201 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { |
| 3159 uint32_t dummy1; | 3202 uint32_t dummy1; |
| 3160 uint32_t dummy2; | 3203 uint32_t dummy2; |
| 3161 return fits_shifter(imm32, &dummy1, &dummy2, NULL); | 3204 return fits_shifter(imm32, &dummy1, &dummy2, NULL); |
| 3162 } | 3205 } |
| 3163 | 3206 |
| 3164 | 3207 |
| 3165 bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) { | 3208 bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) { |
| 3166 return is_uint12(abs(imm32)); | 3209 return is_uint12(abs(imm32)); |
| (...skipping 561 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3728 offset -= kHeapObjectTag; | 3771 offset -= kHeapObjectTag; |
| 3729 entry->merged_index_ = offset; // Stash offset for merged entries. | 3772 entry->merged_index_ = offset; // Stash offset for merged entries. |
| 3730 } else { | 3773 } else { |
| 3731 DCHECK(entry->merged_index_ < (entry - entries_.begin())); | 3774 DCHECK(entry->merged_index_ < (entry - entries_.begin())); |
| 3732 offset = entries_[entry->merged_index_].merged_index_; | 3775 offset = entries_[entry->merged_index_].merged_index_; |
| 3733 } | 3776 } |
| 3734 | 3777 |
| 3735 // Patch vldr/ldr instruction with correct offset. | 3778 // Patch vldr/ldr instruction with correct offset. |
| 3736 Instr instr = assm->instr_at(rinfo.pc()); | 3779 Instr instr = assm->instr_at(rinfo.pc()); |
| 3737 if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) { | 3780 if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) { |
| 3738 // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0]. | 3781 if (CpuFeatures::IsSupported(ARMv7)) { |
| 3739 Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize); | 3782 // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0]. |
| 3740 DCHECK((Assembler::IsMovW(instr) && | 3783 Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize); |
| 3741 Instruction::ImmedMovwMovtValue(instr) == 0)); | 3784 DCHECK((Assembler::IsMovW(instr) && |
| 3742 DCHECK((Assembler::IsMovT(next_instr) && | 3785 Instruction::ImmedMovwMovtValue(instr) == 0)); |
| 3743 Instruction::ImmedMovwMovtValue(next_instr) == 0)); | 3786 DCHECK((Assembler::IsMovT(next_instr) && |
| 3744 assm->instr_at_put(rinfo.pc(), | 3787 Instruction::ImmedMovwMovtValue(next_instr) == 0)); |
| 3745 Assembler::PatchMovwImmediate(instr, offset & 0xffff)); | 3788 assm->instr_at_put( |
| 3746 assm->instr_at_put( | 3789 rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff)); |
| 3747 rinfo.pc() + Assembler::kInstrSize, | 3790 assm->instr_at_put( |
| 3748 Assembler::PatchMovwImmediate(next_instr, offset >> 16)); | 3791 rinfo.pc() + Assembler::kInstrSize, |
| 3792 Assembler::PatchMovwImmediate(next_instr, offset >> 16)); |
| 3793 } else { |
| 3794 // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0]. |
| 3795 Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize); |
| 3796 Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize); |
| 3797 Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize); |
| 3798 DCHECK((Assembler::IsMovImmed(instr) && |
| 3799 Instruction::Immed8Value(instr) == 0)); |
| 3800 DCHECK((Assembler::IsOrrImmed(instr_2) && |
| 3801 Instruction::Immed8Value(instr_2) == 0) && |
| 3802 Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2))); |
| 3803 DCHECK((Assembler::IsOrrImmed(instr_3) && |
| 3804 Instruction::Immed8Value(instr_3) == 0) && |
| 3805 Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3))); |
| 3806 DCHECK((Assembler::IsOrrImmed(instr_4) && |
| 3807 Instruction::Immed8Value(instr_4) == 0) && |
| 3808 Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4))); |
| 3809 assm->instr_at_put( |
| 3810 rinfo.pc(), Assembler::PatchShiftImm(instr, (offset & kImm8Mask))); |
| 3811 assm->instr_at_put( |
| 3812 rinfo.pc() + Assembler::kInstrSize, |
| 3813 Assembler::PatchShiftImm(instr_2, (offset & (kImm8Mask << 8)))); |
| 3814 assm->instr_at_put( |
| 3815 rinfo.pc() + 2 * Assembler::kInstrSize, |
| 3816 Assembler::PatchShiftImm(instr_3, (offset & (kImm8Mask << 16)))); |
| 3817 assm->instr_at_put( |
| 3818 rinfo.pc() + 3 * Assembler::kInstrSize, |
| 3819 Assembler::PatchShiftImm(instr_4, (offset & (kImm8Mask << 24)))); |
| 3820 } |
| 3749 } else if (type == ConstantPoolArray::INT64) { | 3821 } else if (type == ConstantPoolArray::INT64) { |
| 3750 // Instruction to patch must be 'vldr rd, [pp, #0]'. | 3822 // Instruction to patch must be 'vldr rd, [pp, #0]'. |
| 3751 DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) && | 3823 DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) && |
| 3752 Assembler::GetVldrDRegisterImmediateOffset(instr) == 0)); | 3824 Assembler::GetVldrDRegisterImmediateOffset(instr) == 0)); |
| 3753 DCHECK(is_uint10(offset)); | 3825 DCHECK(is_uint10(offset)); |
| 3754 assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset( | 3826 assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset( |
| 3755 instr, offset)); | 3827 instr, offset)); |
| 3756 } else { | 3828 } else { |
| 3757 // Instruction to patch must be 'ldr rd, [pp, #0]'. | 3829 // Instruction to patch must be 'ldr rd, [pp, #0]'. |
| 3758 DCHECK((Assembler::IsLdrPpImmediateOffset(instr) && | 3830 DCHECK((Assembler::IsLdrPpImmediateOffset(instr) && |
| 3759 Assembler::GetLdrRegisterImmediateOffset(instr) == 0)); | 3831 Assembler::GetLdrRegisterImmediateOffset(instr) == 0)); |
| 3760 DCHECK(is_uint12(offset)); | 3832 DCHECK(is_uint12(offset)); |
| 3761 assm->instr_at_put( | 3833 assm->instr_at_put( |
| 3762 rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset)); | 3834 rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset)); |
| 3763 } | 3835 } |
| 3764 } | 3836 } |
| 3765 } | 3837 } |
| 3766 | 3838 |
| 3767 | 3839 |
| 3768 } } // namespace v8::internal | 3840 } } // namespace v8::internal |
| 3769 | 3841 |
| 3770 #endif // V8_TARGET_ARCH_ARM | 3842 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |