Chromium Code Reviews| Index: src/arm64/assembler-arm64.cc |
| diff --git a/src/arm64/assembler-arm64.cc b/src/arm64/assembler-arm64.cc |
| index 90cff59620e316061ad5513e176c5fd5cf8e2621..1f99bb47d6e8fa2041a270b21eaa16733edfc153 100644 |
| --- a/src/arm64/assembler-arm64.cc |
| +++ b/src/arm64/assembler-arm64.cc |
| @@ -296,10 +296,218 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const { |
| } |
| -// Assembler |
| +// Constant Pool. |
| +void ConstPool::RecordEntry(intptr_t data, |
| + RelocInfo::Mode mode) { |
| + ASSERT(mode != RelocInfo::COMMENT && |
| + mode != RelocInfo::POSITION && |
| + mode != RelocInfo::STATEMENT_POSITION && |
| + mode != RelocInfo::CONST_POOL && |
| + mode != RelocInfo::VENEER_POOL && |
| + mode != RelocInfo::CODE_AGE_SEQUENCE); |
| + |
| + uint64_t raw_data = static_cast<uint64_t>(data); |
| + int offset = assm_->pc_offset(); |
| + if (IsEmpty()) { |
| + first_use_ = offset; |
| + } |
| + |
| + std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset); |
| + if (CanBeShared(mode)) { |
| + shared_entries_.insert(entry); |
| + if (shared_entries_.count(entry.first) == 1) { |
| + shared_entries_count++; |
| + } |
| + } else { |
| + unique_entries_.push_back(entry); |
| + } |
| +} |
| + |
| + |
| +int ConstPool::DistanceToFirstUse() { |
| + ASSERT(first_use_ >= 0); |
| + return assm_->pc_offset() - first_use_; |
| +} |
| + |
| + |
| +int ConstPool::MaxPcOffset() { |
| + // There are no pending entries in the pool so we can never get out of |
| + // range. |
| + if (IsEmpty()) return kMaxInt; |
| + |
| + // Entries can be reshuffled so in the worst case the first constant pool use |
| + // will be accessing the last entry. |
|
rmcilroy
2014/06/17 13:09:56
nit - this comment was a bit unclear to me first (
Rodolph Perfetta (ARM)
2014/06/18 16:53:22
Done.
|
| + return first_use_ + kMaxLoadLiteralRange - WorstCaseSize(); |
| +} |
| + |
| + |
| +int ConstPool::WorstCaseSize() { |
| + if (IsEmpty()) return 0; |
| + |
| + // Max size prologue: |
| + // b over |
| + // ldr xzr, #pool_size |
| + // blr xzr |
| + // nop |
| + // All entries are 64-bit for now. |
| + return 4 * kInstructionSize + EntryCount() * kPointerSize; |
| +} |
| + |
| + |
| +int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) { |
| + if (IsEmpty()) return 0; |
| + |
| + // Prologue is: |
| + // b over ;; if require_jump |
| + // ldr xzr, #pool_size |
| + // blr xzr |
| + // nop ;; if not 64-bit aligned |
| + int prologue_size = require_jump ? kInstructionSize : 0; |
| + prologue_size += 2 * kInstructionSize; |
| + prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ? |
| + 0 : kInstructionSize; |
| + |
| + // All entries are 64-bit for now. |
| + return prologue_size + EntryCount() * kPointerSize; |
| +} |
| + |
| + |
| +void ConstPool::Emit(bool require_jump) { |
| + ASSERT(assm_->is_const_pool_blocked()); |
|
rmcilroy
2014/06/17 13:09:57
This is a bit counter intuitive - maybe have the B
Rodolph Perfetta (ARM)
2014/06/18 16:53:23
Done.
|
| + |
| + int size = SizeIfEmittedAtCurrentPc(require_jump); |
| + Label size_check; |
| + assm_->bind(&size_check); |
| + |
| + assm_->RecordConstPool(size); |
| + // Emit the constant pool. It is preceded by an optional branch if |
| + // require_jump and a header which will: |
| + // 1) Encode the size of the constant pool, for use by the disassembler. |
| + // 2) Terminate the program, to try to prevent execution from accidentally |
| + // flowing into the constant pool. |
| + // 3) align the pool entries to 64-bit. |
| + // The header is therefore made of up to three arm64 instructions: |
| + // ldr xzr, #<size of the constant pool in 32-bit words> |
| + // blr xzr |
| + // nop |
| + // |
| + // If executed, the header will likely segfault and lr will point to the |
| + // instruction following the offending blr. |
| + // TODO(all): Make the alignment part less fragile. Currently code is |
| + // allocated as a byte array so there are no guarantees the alignment will |
| + // be preserved on compaction. Currently it works as allocation seems to be |
| + // 64-bit aligned. |
| + |
| + // Emit branch if required |
| + Label after_pool; |
| + if (require_jump) { |
| + assm_->b(&after_pool); |
| + } |
| + |
| + // Emit the header. |
| + assm_->RecordComment("[ Constant Pool"); |
| + EmitMarker(); |
| + EmitGuard(); |
| + assm_->Align(8); |
| + |
| + // Emit constant pool entries. |
| + // TODO(all): currently each relocated constant is 64 bits, consider adding |
| + // support for 32-bit entries. |
| + EmitEntries(); |
| + assm_->RecordComment("]"); |
| + |
| + if (after_pool.is_linked()) { |
| + assm_->bind(&after_pool); |
| + } |
| + |
| + ASSERT(assm_->SizeOfCodeGeneratedSince(&size_check) == |
| + static_cast<unsigned>(size)); |
| +} |
| + |
| + |
| +void ConstPool::Clear() { |
| + shared_entries_.clear(); |
| + shared_entries_count = 0; |
| + unique_entries_.clear(); |
| + first_use_ = -1; |
| +} |
| + |
| + |
| +bool ConstPool::CanBeShared(RelocInfo::Mode mode) { |
| + // Constant pool currently does not support 32-bit entries. |
| + ASSERT(mode != RelocInfo::NONE32); |
| + |
| + return RelocInfo::IsNone(mode) || |
| + (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL)); |
| +} |
| + |
| + |
| +void ConstPool::EmitMarker() { |
| + // A constant pool size is expressed in number of 32-bits words. |
| + // Currently all entries are 64-bit. |
| + // + 1 is for the crash guard. |
| + // + 0/1 for alignment. |
| + int word_count = EntryCount() * 2 + 1 + |
| + (IsAligned(assm_->pc_offset(), 8)) ? 0 : 1; |
| + assm_->Emit(LDR_x_lit | |
| + Assembler::ImmLLiteral(word_count) | |
| + Assembler::Rt(xzr)); |
| +} |
| + |
| +void ConstPool::EmitGuard() { |
| +#ifdef DEBUG |
| + Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc()); |
| + ASSERT(instr->preceding()->IsLdrLiteralX() && |
| + instr->preceding()->Rt() == xzr.code()); |
| +#endif |
| + assm_->EmitPoolGuard(); |
| +} |
| + |
| + |
| +void ConstPool::EmitEntries() { |
| + ASSERT(IsAligned(assm_->pc_offset(), 8)); |
| + |
| + // Emit shared entries. |
| + while (!shared_entries_.empty()) { |
| + typedef std::multimap<uint64_t, int>::const_iterator shared_entries_it; |
|
rmcilroy
2014/06/17 13:09:57
Use Type style name for shared_entries_it (e.g., C
Rodolph Perfetta (ARM)
2014/06/18 16:53:22
Done.
|
| + std::pair<shared_entries_it, shared_entries_it> range; |
| + uint64_t data = shared_entries_.begin()->first; |
| + range = shared_entries_.equal_range(data); |
| + shared_entries_it shared_it; |
| + for (shared_it = range.first; shared_it != range.second; shared_it++) { |
| + Instruction* instr = assm_->InstructionAt(shared_it->second); |
| + |
| + // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. |
| + ASSERT(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); |
| + instr->SetImmPCOffsetTarget(assm_->pc()); |
| + } |
| + assm_->dc64(data); |
| + shared_entries_.erase(data); |
|
rmcilroy
2014/06/17 13:09:57
nit - could you leave the entry here and just do s
Rodolph Perfetta (ARM)
2014/06/18 16:53:23
Loop updated to iterate through literals and then
|
| + } |
| + shared_entries_count = 0; |
| + |
| + // Emit unique entries. |
| + std::vector<std::pair<uint64_t, int> >::const_iterator unique_it; |
| + for (unique_it = unique_entries_.begin(); |
| + unique_it != unique_entries_.end(); |
| + unique_it++) { |
| + Instruction* instr = assm_->InstructionAt(unique_it->second); |
| + |
| + // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. |
| + ASSERT(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); |
| + instr->SetImmPCOffsetTarget(assm_->pc()); |
| + assm_->dc64(unique_it->first); |
| + } |
| + unique_entries_.clear(); |
| + first_use_ = -1; |
| +} |
| + |
| + |
| +// Assembler |
| Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
| : AssemblerBase(isolate, buffer, buffer_size), |
| + constpool_(this), |
| recorded_ast_id_(TypeFeedbackId::None()), |
| unresolved_branches_(), |
| positions_recorder_(this) { |
| @@ -310,7 +518,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
| Assembler::~Assembler() { |
| - ASSERT(num_pending_reloc_info_ == 0); |
| + ASSERT(constpool_.IsEmpty()); |
| ASSERT(const_pool_blocked_nesting_ == 0); |
| ASSERT(veneer_pool_blocked_nesting_ == 0); |
| } |
| @@ -327,11 +535,10 @@ void Assembler::Reset() { |
| pc_ = buffer_; |
| reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_), |
| reinterpret_cast<byte*>(pc_)); |
| - num_pending_reloc_info_ = 0; |
| + constpool_.Clear(); |
| next_constant_pool_check_ = 0; |
| next_veneer_pool_check_ = kMaxInt; |
| no_const_pool_before_ = 0; |
| - first_const_pool_use_ = -1; |
| ClearRecordedAstId(); |
| } |
| @@ -339,7 +546,7 @@ void Assembler::Reset() { |
| void Assembler::GetCode(CodeDesc* desc) { |
| // Emit constant pool if necessary. |
| CheckConstPool(true, false); |
| - ASSERT(num_pending_reloc_info_ == 0); |
| + ASSERT(constpool_.IsEmpty()); |
| // Set up code descriptor. |
| if (desc) { |
| @@ -622,8 +829,7 @@ void Assembler::StartBlockConstPool() { |
| void Assembler::EndBlockConstPool() { |
| if (--const_pool_blocked_nesting_ == 0) { |
| // Check the constant pool hasn't been blocked for too long. |
| - ASSERT((num_pending_reloc_info_ == 0) || |
| - (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool))); |
| + ASSERT(pc_offset() < constpool_.MaxPcOffset()); |
| // Two cases: |
| // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is |
| // still blocked |
| @@ -682,13 +888,6 @@ int Assembler::ConstantPoolSizeAt(Instruction* instr) { |
| } |
| -void Assembler::ConstantPoolMarker(uint32_t size) { |
| - ASSERT(is_const_pool_blocked()); |
| - // + 1 is for the crash guard. |
| - Emit(LDR_x_lit | ImmLLiteral(size + 1) | Rt(xzr)); |
| -} |
| - |
| - |
| void Assembler::EmitPoolGuard() { |
| // We must generate only one instruction as this is used in scopes that |
| // control the size of the code generated. |
| @@ -696,18 +895,6 @@ void Assembler::EmitPoolGuard() { |
| } |
| -void Assembler::ConstantPoolGuard() { |
| -#ifdef DEBUG |
| - // Currently this is only used after a constant pool marker. |
| - ASSERT(is_const_pool_blocked()); |
| - Instruction* instr = reinterpret_cast<Instruction*>(pc_); |
| - ASSERT(instr->preceding()->IsLdrLiteralX() && |
| - instr->preceding()->Rt() == xzr.code()); |
| -#endif |
| - EmitPoolGuard(); |
| -} |
| - |
| - |
| void Assembler::StartBlockVeneerPool() { |
| ++veneer_pool_blocked_nesting_; |
| } |
| @@ -2466,15 +2653,7 @@ void Assembler::GrowBuffer() { |
| // buffer nor pc absolute pointing inside the code buffer, so there is no need |
| // to relocate any emitted relocation entries. |
| - // Relocate pending relocation entries. |
| - for (int i = 0; i < num_pending_reloc_info_; i++) { |
| - RelocInfo& rinfo = pending_reloc_info_[i]; |
| - ASSERT(rinfo.rmode() != RelocInfo::COMMENT && |
| - rinfo.rmode() != RelocInfo::POSITION); |
| - if (rinfo.rmode() != RelocInfo::JS_RETURN) { |
| - rinfo.set_pc(rinfo.pc() + pc_delta); |
| - } |
| - } |
| + // Pending relocation entries are also relative, no need to relocate. |
| } |
| @@ -2494,11 +2673,12 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
| || RelocInfo::IsVeneerPool(rmode)); |
| // These modes do not need an entry in the constant pool. |
| } else { |
| - ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); |
| - if (num_pending_reloc_info_ == 0) { |
| - first_const_pool_use_ = pc_offset(); |
| + if (constpool_.EntryCount() > kApproximatePoolEntryCount) { |
|
rmcilroy
2014/06/17 13:09:57
Could we just do both this check and the check for
Rodolph Perfetta (ARM)
2014/06/18 16:53:23
moving the this code in RecordEntry is a good idea
|
| + // It is time to emit the constant pool after this instruction. |
| + next_constant_pool_check_ = pc_offset() + kInstructionSize; |
| } |
| - pending_reloc_info_[num_pending_reloc_info_++] = rinfo; |
| + |
| + constpool_.RecordEntry(data, rmode); |
| // Make sure the constant pool is not emitted in place of the next |
| // instruction for which we just recorded relocation info. |
| BlockConstPoolFor(1); |
| @@ -2526,11 +2706,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
| void Assembler::BlockConstPoolFor(int instructions) { |
| int pc_limit = pc_offset() + instructions * kInstructionSize; |
| if (no_const_pool_before_ < pc_limit) { |
| - // If there are some pending entries, the constant pool cannot be blocked |
| - // further than first_const_pool_use_ + kMaxDistToConstPool |
| - ASSERT((num_pending_reloc_info_ == 0) || |
| - (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool))); |
| no_const_pool_before_ = pc_limit; |
| + // Make sure the pool won't be blocked for too long. |
| + ASSERT(pc_limit < constpool_.MaxPcOffset()); |
| } |
| if (next_constant_pool_check_ < no_const_pool_before_) { |
| @@ -2550,7 +2728,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { |
| } |
| // There is nothing to do if there are no pending constant pool entries. |
| - if (num_pending_reloc_info_ == 0) { |
| + if (constpool_.IsEmpty()) { |
| // Calculate the offset of the next check. |
| next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; |
| return; |
| @@ -2559,97 +2737,40 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { |
| // We emit a constant pool when: |
| // * requested to do so by parameter force_emit (e.g. after each function). |
| // * the distance to the first instruction accessing the constant pool is |
| - // kAvgDistToConstPool or more. |
| - // * no jump is required and the distance to the first instruction accessing |
| - // the constant pool is at least kMaxDistToPConstool / 2. |
| - ASSERT(first_const_pool_use_ >= 0); |
| - int dist = pc_offset() - first_const_pool_use_; |
| - if (!force_emit && dist < kAvgDistToConstPool && |
| - (require_jump || (dist < (kMaxDistToConstPool / 2)))) { |
| + // kApproximateDistToConstPool or more. |
| + // * the number of entries in the pool is kApproximatePoolEntryCount or more. |
| + int dist = constpool_.DistanceToFirstUse(); |
| + int count = constpool_.EntryCount(); |
| + if (!force_emit && |
| + (dist < kApproximateDistToConstPool) && |
| + (count < kApproximatePoolEntryCount)) { |
| return; |
| } |
| - int jump_instr = require_jump ? kInstructionSize : 0; |
| - int size_pool_marker = kInstructionSize; |
| - int size_pool_guard = kInstructionSize; |
| - int pool_size = jump_instr + size_pool_marker + size_pool_guard + |
| - num_pending_reloc_info_ * kPointerSize; |
| - int needed_space = pool_size + kGap; |
| // Emit veneers for branches that would go out of range during emission of the |
| // constant pool. |
| - CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + pool_size); |
| - |
| - Label size_check; |
| - bind(&size_check); |
| + int size = constpool_.WorstCaseSize(); |
|
rmcilroy
2014/06/17 13:09:57
s/size/worst_case_size
Rodolph Perfetta (ARM)
2014/06/18 16:53:22
Done.
|
| + CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + size); |
| + // Buffer checks happen after an emit hence the 2 * kInstructionSize. |
|
rmcilroy
2014/06/17 13:09:57
I'm not sure what the buffer checks you mention he
Rodolph Perfetta (ARM)
2014/06/18 16:53:23
My comment is clumsy and inaccurate. What I meant
|
| + int needed_space = size + kGap + 2 * kInstructionSize; |
| // Check that the code buffer is large enough before emitting the constant |
| // pool (include the jump over the pool, the constant pool marker, the |
| // constant pool guard, and the gap to the relocation information). |
| - while (buffer_space() <= needed_space) { |
| + while (buffer_space() < needed_space) { |
| GrowBuffer(); |
| } |
| { |
| // Block recursive calls to CheckConstPool and protect from veneer pools. |
| BlockPoolsScope block_pools(this); |
| - RecordConstPool(pool_size); |
| - |
| - // Emit jump over constant pool if necessary. |
| - Label after_pool; |
| - if (require_jump) { |
| - b(&after_pool); |
| - } |
| - |
| - // Emit a constant pool header. The header has two goals: |
| - // 1) Encode the size of the constant pool, for use by the disassembler. |
| - // 2) Terminate the program, to try to prevent execution from accidentally |
| - // flowing into the constant pool. |
| - // The header is therefore made of two arm64 instructions: |
| - // ldr xzr, #<size of the constant pool in 32-bit words> |
| - // blr xzr |
| - // If executed the code will likely segfault and lr will point to the |
| - // beginning of the constant pool. |
| - // TODO(all): currently each relocated constant is 64 bits, consider adding |
| - // support for 32-bit entries. |
| - RecordComment("[ Constant Pool"); |
| - ConstantPoolMarker(2 * num_pending_reloc_info_); |
| - ConstantPoolGuard(); |
| - |
| - // Emit constant pool entries. |
| - for (int i = 0; i < num_pending_reloc_info_; i++) { |
| - RelocInfo& rinfo = pending_reloc_info_[i]; |
| - ASSERT(rinfo.rmode() != RelocInfo::COMMENT && |
| - rinfo.rmode() != RelocInfo::POSITION && |
| - rinfo.rmode() != RelocInfo::STATEMENT_POSITION && |
| - rinfo.rmode() != RelocInfo::CONST_POOL && |
| - rinfo.rmode() != RelocInfo::VENEER_POOL); |
| - |
| - Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc()); |
| - // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. |
| - ASSERT(instr->IsLdrLiteral() && |
| - instr->ImmLLiteral() == 0); |
| - |
| - instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_)); |
| - dc64(rinfo.data()); |
| - } |
| - |
| - num_pending_reloc_info_ = 0; |
| - first_const_pool_use_ = -1; |
| - |
| - RecordComment("]"); |
| - |
| - if (after_pool.is_linked()) { |
| - bind(&after_pool); |
| - } |
| + constpool_.Emit(require_jump); |
| } |
| // Since a constant pool was just emitted, move the check offset forward by |
| // the standard interval. |
| next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; |
| - |
| - ASSERT(SizeOfCodeGeneratedSince(&size_check) == |
| - static_cast<unsigned>(pool_size)); |
| } |
|
rmcilroy
2014/06/17 13:09:57
Add an assert that the size of code generated was
Rodolph Perfetta (ARM)
2014/06/18 16:53:22
Done.
|