Chromium Code Reviews| Index: src/arm/assembler-arm.cc |
| diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc |
| index 9d61ab113bfe6e1161a9e959412a0f175b540305..845c9935c17c6bba144558f86f05f9e03c5aaafb 100644 |
| --- a/src/arm/assembler-arm.cc |
| +++ b/src/arm/assembler-arm.cc |
| @@ -293,15 +293,20 @@ const int RelocInfo::kApplyMask = 0; |
| bool RelocInfo::IsCodedSpecially() { |
| - // The deserializer needs to know whether a pointer is specially coded. Being |
| - // specially coded on ARM means that it is a movw/movt instruction. We don't |
| - // generate those yet. |
| - return false; |
| + // The deserializer needs to know whether a pointer is specially coded. Bein |
|
ulan
2014/03/18 12:27:51
"Being"
rmcilroy
2014/03/18 15:14:35
Done.
|
| + // specially coded on ARM means that it is a movw/movt instruction, or is an |
| + // out of line constant pool entry. These only occur if |
| + // FLAG_enable_ool_constant_pool is true. |
| + return FLAG_enable_ool_constant_pool; |
| } |
| bool RelocInfo::IsInConstantPool() { |
| - return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)); |
| + if (FLAG_enable_ool_constant_pool) { |
| + return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_)); |
| + } else { |
| + return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)); |
| + } |
| } |
| @@ -480,9 +485,15 @@ const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12; |
| // ldr rd, [pc, #offset] |
| const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16; |
| const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16; |
| +// ldr rd, [pp, #offset] |
| +const Instr kLdrPpMask = 15 * B24 | 7 * B20 | 15 * B16; |
| +const Instr kLdrPpPattern = 5 * B24 | L | kRegister_r8_Code * B16; |
| // vldr dd, [pc, #offset] |
| const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8; |
| const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8; |
| +// vldr dd, [pp, #offset] |
| +const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8; |
| +const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8; |
| // blxcc rm |
| const Instr kBlxRegMask = |
| 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; |
| @@ -520,6 +531,7 @@ const Instr kLdrStrOffsetMask = 0x00000fff; |
| Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
| : AssemblerBase(isolate, buffer, buffer_size), |
| recorded_ast_id_(TypeFeedbackId::None()), |
| + constant_pool_builder_(), |
| positions_recorder_(this) { |
| reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); |
| num_pending_32_bit_reloc_info_ = 0; |
| @@ -542,17 +554,26 @@ Assembler::~Assembler() { |
| void Assembler::GetCode(CodeDesc* desc) { |
| - // Emit constant pool if necessary. |
| - CheckConstPool(true, false); |
| - ASSERT(num_pending_32_bit_reloc_info_ == 0); |
| - ASSERT(num_pending_64_bit_reloc_info_ == 0); |
| - |
| + if (!FLAG_enable_ool_constant_pool) { |
| + // Emit constant pool if necessary. |
| + CheckConstPool(true, false); |
| + ASSERT(num_pending_32_bit_reloc_info_ == 0); |
| + ASSERT(num_pending_64_bit_reloc_info_ == 0); |
| + } |
| // Set up code descriptor. |
| desc->buffer = buffer_; |
| desc->buffer_size = buffer_size_; |
| desc->instr_size = pc_offset(); |
| desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
| desc->origin = this; |
| + if (FLAG_enable_ool_constant_pool) { |
| + desc->constant_pool_64bit_count = constant_pool_builder_.count_of_64bit(); |
| + desc->constant_pool_code_ptr_count = |
| + constant_pool_builder_.count_of_code_ptr(); |
| + desc->constant_pool_heap_ptr_count = |
| + constant_pool_builder_.count_of_heap_ptr(); |
| + desc->constant_pool_32bit_count = constant_pool_builder_.count_of_32bit(); |
| + } |
| } |
| @@ -729,6 +750,13 @@ bool Assembler::IsLdrPcImmediateOffset(Instr instr) { |
| } |
| +bool Assembler::IsLdrPpImmediateOffset(Instr instr) { |
| + // Check the instruction is indeed a |
| + // ldr<cond> <Rd>, [pp +/- offset_12]. |
| + return (instr & kLdrPpMask) == kLdrPpPattern; |
| +} |
| + |
| + |
| bool Assembler::IsVldrDPcImmediateOffset(Instr instr) { |
| // Check the instruction is indeed a |
| // vldr<cond> <Dd>, [pc +/- offset_10]. |
| @@ -736,6 +764,13 @@ bool Assembler::IsVldrDPcImmediateOffset(Instr instr) { |
| } |
| +bool Assembler::IsVldrDPpImmediateOffset(Instr instr) { |
| + // Check the instruction is indeed a |
| + // vldr<cond> <Dd>, [pp +/- offset_10]. |
| + return (instr & kVldrDPpMask) == kVldrDPpPattern; |
| +} |
| + |
| + |
| bool Assembler::IsTstImmediate(Instr instr) { |
| return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) == |
| (I | TST | S); |
| @@ -1063,7 +1098,10 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const { |
| static bool use_mov_immediate_load(const Operand& x, |
| const Assembler* assembler) { |
| - if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && |
| + if (assembler != NULL && !assembler->can_use_constant_pool()) { |
| + // If there is no constant pool available, we must use an mov immediate. |
|
ulan
2014/03/18 12:27:51
Maybe ASSERT(CpuFeatures::IsSupported(MOVW_MOVT_IM
rmcilroy
2014/03/18 15:14:35
What we actually want is IsSupported(ARMv7). MOVW
|
| + return true; |
| + } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && |
| (assembler == NULL || !assembler->predictable_code_size())) { |
| // Prefer movw / movt to constant pool if it is more efficient on the CPU. |
| return true; |
| @@ -1106,22 +1144,30 @@ bool Operand::is_single_instruction(const Assembler* assembler, |
| void Assembler::move_32_bit_immediate(Register rd, |
| const Operand& x, |
| Condition cond) { |
| - if (rd.code() != pc.code()) { |
| - if (use_mov_immediate_load(x, this)) { |
| - if (x.must_output_reloc_info(this)) { |
| - RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL); |
| - // Make sure the movw/movt doesn't get separated. |
| - BlockConstPoolFor(2); |
| - } |
| - emit(cond | 0x30*B20 | rd.code()*B12 | |
| - EncodeMovwImmediate(x.imm32_ & 0xffff)); |
| - movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond); |
| - return; |
| + RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL); |
| + if (x.must_output_reloc_info(this)) { |
| + RecordRelocInfo(rinfo); |
| + } |
| + |
| + if (use_mov_immediate_load(x, this)) { |
| + Register target = rd.code() == pc.code() ? ip : rd; |
| + // TODO(rmcilroy): add ARMv6 support for immediate loads. |
| + ASSERT(CpuFeatures::IsSupported(ARMv7)); |
| + if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) { |
| + // Make sure the movw/movt doesn't get separated. |
| + BlockConstPoolFor(2); |
| } |
| + emit(cond | 0x30*B20 | target.code()*B12 | |
| + EncodeMovwImmediate(x.imm32_ & 0xffff)); |
| + movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond); |
| + if (target.code() != rd.code()) { |
| + mov(rd, target, LeaveCC, cond); |
| + } |
| + } else { |
| + ASSERT(can_use_constant_pool()); |
| + ConstantPoolAddEntry(rinfo); |
| + ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond); |
| } |
| - |
| - RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL); |
| - ldr(rd, MemOperand(pc, 0), cond); |
| } |
| @@ -2421,7 +2467,7 @@ void Assembler::vmov(const DwVfpRegister dst, |
| int vd, d; |
| dst.split_code(&vd, &d); |
| emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc); |
| - } else if (FLAG_enable_vldr_imm) { |
| + } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) { |
| // TODO(jfb) Temporarily turned off until we have constant blinding or |
| // some equivalent mitigation: an attacker can otherwise control |
| // generated data which also happens to be executable, a Very Bad |
| @@ -2437,8 +2483,9 @@ void Assembler::vmov(const DwVfpRegister dst, |
| // The code could also randomize the order of values, though |
| // that's tricky because vldr has a limited reach. Furthermore |
| // it breaks load locality. |
| - RecordRelocInfo(imm); |
| - vldr(dst, MemOperand(pc, 0)); |
| + RelocInfo rinfo(pc_, imm); |
| + ConstantPoolAddEntry(rinfo); |
| + vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0)); |
| } else { |
| // Synthesise the double from ARM immediates. |
| uint32_t lo, hi; |
| @@ -3168,6 +3215,7 @@ void Assembler::GrowBuffer() { |
| ASSERT(rinfo.rmode() == RelocInfo::NONE64); |
| rinfo.set_pc(rinfo.pc() + pc_delta); |
| } |
| + constant_pool_builder_.Relocate(pc_delta); |
| } |
| @@ -3203,28 +3251,16 @@ void Assembler::emit_code_stub_address(Code* stub) { |
| } |
| -void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, |
| - UseConstantPoolMode mode) { |
| - // We do not try to reuse pool constants. |
| +void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
| RelocInfo rinfo(pc_, rmode, data, NULL); |
| - if (((rmode >= RelocInfo::JS_RETURN) && |
| - (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || |
| - (rmode == RelocInfo::CONST_POOL) || |
| - mode == DONT_USE_CONSTANT_POOL) { |
| - // Adjust code for new modes. |
| - ASSERT(RelocInfo::IsDebugBreakSlot(rmode) |
| - || RelocInfo::IsJSReturn(rmode) |
| - || RelocInfo::IsComment(rmode) |
| - || RelocInfo::IsPosition(rmode) |
| - || RelocInfo::IsConstPool(rmode) |
| - || mode == DONT_USE_CONSTANT_POOL); |
| - // These modes do not need an entry in the constant pool. |
| - } else { |
| - RecordRelocInfoConstantPoolEntryHelper(rinfo); |
| - } |
| + RecordRelocInfo(rinfo); |
| +} |
| + |
| + |
| +void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { |
| if (!RelocInfo::IsNone(rinfo.rmode())) { |
| // Don't record external references unless the heap will be serialized. |
| - if (rmode == RelocInfo::EXTERNAL_REFERENCE) { |
| + if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) { |
| #ifdef DEBUG |
| if (!Serializer::enabled()) { |
| Serializer::TooLateToEnableNow(); |
| @@ -3235,9 +3271,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, |
| } |
| } |
| ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here |
| - if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { |
| - RelocInfo reloc_info_with_ast_id(pc_, |
| - rmode, |
| + if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) { |
| + RelocInfo reloc_info_with_ast_id(rinfo.pc(), |
| + rinfo.rmode(), |
| RecordedAstId().ToInt(), |
| NULL); |
| ClearRecordedAstId(); |
| @@ -3249,34 +3285,33 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, |
| } |
| -void Assembler::RecordRelocInfo(double data) { |
| - // We do not try to reuse pool constants. |
| - RelocInfo rinfo(pc_, data); |
| - RecordRelocInfoConstantPoolEntryHelper(rinfo); |
| -} |
| - |
| - |
| -void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) { |
| - if (rinfo.rmode() == RelocInfo::NONE64) { |
| - ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo); |
| - if (num_pending_64_bit_reloc_info_ == 0) { |
| - first_const_pool_64_use_ = pc_offset(); |
| - } |
| - pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo; |
| +void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) { |
| + if (FLAG_enable_ool_constant_pool) { |
| + constant_pool_builder_.AddEntry(this, rinfo); |
| } else { |
| - ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo); |
| - if (num_pending_32_bit_reloc_info_ == 0) { |
| - first_const_pool_32_use_ = pc_offset(); |
| + if (rinfo.rmode() == RelocInfo::NONE64) { |
| + ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo); |
| + if (num_pending_64_bit_reloc_info_ == 0) { |
| + first_const_pool_64_use_ = pc_offset(); |
| + } |
| + pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo; |
| + } else { |
| + ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo); |
| + if (num_pending_32_bit_reloc_info_ == 0) { |
| + first_const_pool_32_use_ = pc_offset(); |
| + } |
| + pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo; |
| } |
| - pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo; |
| + // Make sure the constant pool is not emitted in place of the next |
| + // instruction for which we just recorded relocation info. |
| + BlockConstPoolFor(1); |
| } |
| - // Make sure the constant pool is not emitted in place of the next |
| - // instruction for which we just recorded relocation info. |
| - BlockConstPoolFor(1); |
| } |
| void Assembler::BlockConstPoolFor(int instructions) { |
| + if (FLAG_enable_ool_constant_pool) return; |
|
ulan
2014/03/18 12:27:51
If FLAG_enable_ool_constant_pool then this functio
rmcilroy
2014/03/18 15:14:35
Done.
|
| + |
| int pc_limit = pc_offset() + instructions * kInstrSize; |
| if (no_const_pool_before_ < pc_limit) { |
| // Max pool start (if we need a jump and an alignment). |
| @@ -3298,6 +3333,8 @@ void Assembler::BlockConstPoolFor(int instructions) { |
| void Assembler::CheckConstPool(bool force_emit, bool require_jump) { |
| + if (FLAG_enable_ool_constant_pool) return; |
|
ulan
2014/03/18 12:27:51
Same as above.
rmcilroy
2014/03/18 15:14:35
Done.
|
| + |
| // Some short sequence of instruction mustn't be broken up by constant pool |
| // emission, such sequences are protected by calls to BlockConstPoolFor and |
| // BlockConstPoolScope. |
| @@ -3495,6 +3532,203 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { |
| } |
| +void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { |
| + ASSERT(FLAG_enable_ool_constant_pool); |
| + constant_pool_builder_.Populate(this, constant_pool); |
| +} |
| + |
| + |
| +ConstantPoolBuilder::ConstantPoolBuilder() |
| + : entries_(new RelocInfo[32]), |
| + merged_indexes_(new int[32]), |
| + buffer_size_(32), |
| + number_of_entries_(0), |
| + count_of_64bit_(0), |
| + count_of_code_ptr_(0), |
| + count_of_heap_ptr_(0), |
| + count_of_32bit_(0) { } |
| + |
| + |
| +ConstantPoolBuilder::~ConstantPoolBuilder() { |
| + delete[] entries_; |
| + delete[] merged_indexes_; |
| +} |
| + |
| + |
| +bool ConstantPoolBuilder::IsEmpty() { |
| + return number_of_entries_ == 0; |
| +} |
| + |
| + |
| +bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) { |
| + return rmode == RelocInfo::NONE64; |
| +} |
| + |
| + |
| +bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) { |
| + return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64; |
| +} |
| + |
| + |
| +bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) { |
| + return RelocInfo::IsCodeTarget(rmode); |
| +} |
| + |
| + |
| +bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) { |
| + return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode); |
| +} |
| + |
| + |
| +void ConstantPoolBuilder::AddEntry(Assembler* assm, |
| + const RelocInfo& rinfo) { |
| + RelocInfo::Mode rmode = rinfo.rmode(); |
| + ASSERT(rmode != RelocInfo::COMMENT && |
| + rmode != RelocInfo::POSITION && |
| + rmode != RelocInfo::STATEMENT_POSITION && |
| + rmode != RelocInfo::CONST_POOL); |
| + |
| + if (number_of_entries_ >= buffer_size_) { |
| + GrowBuffer(); |
| + } |
| + int entry_index = number_of_entries_++; |
| + entries_[entry_index] = rinfo; |
| + merged_indexes_[entry_index] = -1; |
| + |
| + // Try to merge entries which won't be patched. |
| + if (RelocInfo::IsNone(rmode) || |
| + (!Serializer::enabled() && (rmode >= RelocInfo::CELL))) { |
| + for (int i = 0; i < entry_index; i++) { |
| + if (RelocInfo::IsEqual(rinfo, entries_[i])) { |
| + merged_indexes_[entry_index] = i; |
| + break; |
| + } |
| + } |
| + } |
| + |
| + if (merged_indexes_[entry_index] == -1) { |
| + // Not merged, so update the appropriate count. |
| + if (Is64BitEntry(rmode)) { |
| + count_of_64bit_++; |
| + } else if (Is32BitEntry(rmode)) { |
| + count_of_32bit_++; |
| + } else if (IsCodePtrEntry(rmode)) { |
| + count_of_code_ptr_++; |
| + } else { |
| + ASSERT(IsHeapPtrEntry(rmode)); |
| + count_of_heap_ptr_++; |
| + } |
| + } |
| + |
| + // Check if we still have room for another entry given Arm's ldr and vldr |
| + // immediate offset range. |
| + if (!(is_uint12(ConstantPoolArray::SizeFor(count_of_64bit_, |
| + count_of_code_ptr_, |
| + count_of_heap_ptr_, |
| + count_of_32bit_))) && |
| + is_uint10(ConstantPoolArray::SizeFor(count_of_64bit_, 0, 0, 0))) { |
| + assm->set_constant_pool_full(); |
| + } |
| +} |
| + |
| + |
| +void ConstantPoolBuilder::GrowBuffer() { |
| + int new_buffer_size = buffer_size_ * 2; |
|
ulan
2014/03/18 12:27:51
Since we already depend on STL, I'd suggest to use
rmcilroy
2014/03/18 15:14:35
Done.
|
| + |
| + RelocInfo* new_entries = new RelocInfo[new_buffer_size]; |
| + OS::MemMove(new_entries, entries_, sizeof(RelocInfo) * buffer_size_); |
| + delete[] entries_; |
| + entries_ = new_entries; |
| + |
| + int* new_merged_indexes = new int[new_buffer_size]; |
| + OS::MemMove(new_merged_indexes, merged_indexes_, |
| + sizeof(merged_indexes_[0]) * buffer_size_); |
| + delete[] merged_indexes_; |
| + merged_indexes_ = new_merged_indexes; |
| + |
| + buffer_size_ = new_buffer_size; |
| +} |
| + |
| + |
| +void ConstantPoolBuilder::Relocate(int pc_delta) { |
| + for (int i = 0; i < number_of_entries_; i++) { |
| + RelocInfo& rinfo = entries_[i]; |
| + ASSERT(rinfo.rmode() != RelocInfo::JS_RETURN); |
| + rinfo.set_pc(rinfo.pc() + pc_delta); |
| + } |
| +} |
| + |
| + |
| +void ConstantPoolBuilder::Populate(Assembler* assm, |
| + ConstantPoolArray* constant_pool) { |
| + ASSERT(constant_pool->count_of_int64_entries() == count_of_64bit_); |
| + ASSERT(constant_pool->count_of_code_ptr_entries() == count_of_code_ptr_); |
| + ASSERT(constant_pool->count_of_heap_ptr_entries() == count_of_heap_ptr_); |
| + ASSERT(constant_pool->count_of_int32_entries() == count_of_32bit_); |
| + |
| + int index_64bit = 0; |
| + int index_code_ptr = count_of_64bit_; |
| + int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_; |
| + int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_; |
| + |
| + for (int i = 0; i < number_of_entries_; i++) { |
| + RelocInfo& rinfo = entries_[i]; |
| + RelocInfo::Mode rmode = rinfo.rmode(); |
| + |
| + // Update constant pool if necessary and get the entry's offset. |
| + int offset; |
| + if (merged_indexes_[i] == -1) { |
| + if (Is64BitEntry(rmode)) { |
| + offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag; |
| + constant_pool->set(index_64bit++, rinfo.data64()); |
| + } else if (Is32BitEntry(rmode)) { |
| + offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag; |
| + constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo.data())); |
| + } else if (IsCodePtrEntry(rmode)) { |
| + offset = constant_pool->OffsetOfElementAt(index_code_ptr) - |
| + kHeapObjectTag; |
| + constant_pool->set(index_code_ptr++, |
| + reinterpret_cast<Object *>(rinfo.data())); |
| + } else { |
| + ASSERT(IsHeapPtrEntry(rmode)); |
| + offset = constant_pool->OffsetOfElementAt(index_heap_ptr) - |
| + kHeapObjectTag; |
| + constant_pool->set(index_heap_ptr++, |
| + reinterpret_cast<Object *>(rinfo.data())); |
| + } |
| + merged_indexes_[i] = offset; // Stash offset for merged entries. |
| + } else { |
| + int merged_index = merged_indexes_[i]; |
| + ASSERT(merged_index < number_of_entries_ && merged_index < i); |
| + offset = merged_indexes_[merged_index]; |
| + } |
| + |
| + // Patch vldr/ldr instruction with correct offset. |
| + Instr instr = assm->instr_at(rinfo.pc()); |
| + if (Is64BitEntry(rmode)) { |
| + // Instruction to patch must be 'vldr rd, [pp, #0]'. |
| + ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) && |
| + Assembler::GetVldrDRegisterImmediateOffset(instr) == 0)); |
| + ASSERT(is_uint10(offset)); |
| + assm->instr_at_put(rinfo.pc(), |
| + Assembler::SetVldrDRegisterImmediateOffset(instr, offset)); |
| + } else { |
| + // Instruction to patch must be 'ldr rd, [pp, #0]'. |
| + ASSERT((Assembler::IsLdrPpImmediateOffset(instr) && |
| + Assembler::GetLdrRegisterImmediateOffset(instr) == 0)); |
| + ASSERT(is_uint12(offset)); |
| + assm->instr_at_put(rinfo.pc(), |
| + Assembler::SetLdrRegisterImmediateOffset(instr, offset)); |
| + } |
| + } |
| + |
| + ASSERT((index_64bit == count_of_64bit_) && |
| + (index_code_ptr == (index_64bit + count_of_code_ptr_)) && |
| + (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) && |
| + (index_32bit == (index_heap_ptr + count_of_32bit_))); |
| +} |
| + |
| + |
| } } // namespace v8::internal |
| #endif // V8_TARGET_ARCH_ARM |