Index: src/a64/assembler-a64.cc |
diff --git a/src/a64/assembler-a64.cc b/src/a64/assembler-a64.cc |
index 43b1391605f7392ffdd19266061dba9369fd8f66..4bc0788e56c8353a40b8d83151a356bbd45b3ffa 100644 |
--- a/src/a64/assembler-a64.cc |
+++ b/src/a64/assembler-a64.cc |
@@ -286,6 +286,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
unresolved_branches_(), |
positions_recorder_(this) { |
const_pool_blocked_nesting_ = 0; |
+ veneer_pool_blocked_nesting_ = 0; |
Reset(); |
} |
@@ -293,6 +294,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
Assembler::~Assembler() { |
ASSERT(num_pending_reloc_info_ == 0); |
ASSERT(const_pool_blocked_nesting_ == 0); |
+ ASSERT(veneer_pool_blocked_nesting_ == 0); |
} |
@@ -300,13 +302,16 @@ void Assembler::Reset() { |
#ifdef DEBUG |
ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); |
ASSERT(const_pool_blocked_nesting_ == 0); |
+ ASSERT(veneer_pool_blocked_nesting_ == 0); |
+ ASSERT(unresolved_branches_.empty()); |
memset(buffer_, 0, pc_ - buffer_); |
#endif |
pc_ = buffer_; |
reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_), |
reinterpret_cast<byte*>(pc_)); |
num_pending_reloc_info_ = 0; |
- next_buffer_check_ = 0; |
+ next_constant_pool_check_ = 0; |
+ next_veneer_pool_check_ = kMaxInt; |
no_const_pool_before_ = 0; |
first_const_pool_use_ = -1; |
ClearRecordedAstId(); |
@@ -534,6 +539,11 @@ int Assembler::LinkAndGetByteOffsetTo(Label* label) { |
void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) { |
+ if (unresolved_branches_.empty()) { |
+ ASSERT(next_veneer_pool_check_ == kMaxInt); |
+ return; |
+ } |
+ |
// Branches to this label will be resolved when the label is bound below. |
std::multimap<int, FarBranchInfo>::iterator it_tmp, it; |
it = unresolved_branches_.begin(); |
@@ -544,6 +554,12 @@ void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) { |
unresolved_branches_.erase(it_tmp); |
} |
} |
+ if (unresolved_branches_.empty()) { |
+ next_veneer_pool_check_ = kMaxInt; |
+ } else { |
+ next_veneer_pool_check_ = |
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin; |
+ } |
} |
@@ -551,7 +567,7 @@ void Assembler::StartBlockConstPool() { |
if (const_pool_blocked_nesting_++ == 0) { |
// Prevent constant pool checks happening by setting the next check to |
// the biggest possible offset. |
- next_buffer_check_ = kMaxInt; |
+ next_constant_pool_check_ = kMaxInt; |
} |
} |
@@ -560,13 +576,13 @@ void Assembler::EndBlockConstPool() { |
if (--const_pool_blocked_nesting_ == 0) { |
// Check the constant pool hasn't been blocked for too long. |
ASSERT((num_pending_reloc_info_ == 0) || |
- (pc_offset() < (first_const_pool_use_ + kMaxDistToPool))); |
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool))); |
// Two cases: |
- // * no_const_pool_before_ >= next_buffer_check_ and the emission is |
+ // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is |
// still blocked |
- // * no_const_pool_before_ < next_buffer_check_ and the next emit will |
- // trigger a check. |
- next_buffer_check_ = no_const_pool_before_; |
+ // * no_const_pool_before_ < next_constant_pool_check_ and the next emit |
+ // will trigger a check. |
+ next_constant_pool_check_ = no_const_pool_before_; |
} |
} |
@@ -622,6 +638,20 @@ void Assembler::ConstantPoolGuard() { |
} |
+void Assembler::StartBlockVeneerPool() { |
+ ++veneer_pool_blocked_nesting_; |
+} |
+ |
+ |
+void Assembler::EndBlockVeneerPool() { |
+ if (--veneer_pool_blocked_nesting_ == 0) { |
+ // Check the veneer pool hasn't been blocked for too long. |
+ ASSERT(unresolved_branches_.empty() || |
+ (pc_offset() < unresolved_branches_first_limit())); |
+ } |
+} |
+ |
+ |
void Assembler::br(const Register& xn) { |
positions_recorder()->WriteRecordedPositions(); |
ASSERT(xn.Is64Bits()); |
@@ -1870,8 +1900,8 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) { |
Serializer::TooLateToEnableNow(); |
#endif |
// The arguments to the debug marker need to be contiguous in memory, so |
- // make sure we don't try to emit a literal pool. |
- BlockConstPoolScope scope(this); |
+ // make sure we don't try to emit pools. |
+ BlockPoolsScope scope(this); |
Label start; |
bind(&start); |
@@ -2445,14 +2475,14 @@ void Assembler::BlockConstPoolFor(int instructions) { |
int pc_limit = pc_offset() + instructions * kInstructionSize; |
if (no_const_pool_before_ < pc_limit) { |
// If there are some pending entries, the constant pool cannot be blocked |
- // further than first_const_pool_use_ + kMaxDistToPool |
+ // further than first_const_pool_use_ + kMaxDistToConstPool |
ASSERT((num_pending_reloc_info_ == 0) || |
- (pc_limit < (first_const_pool_use_ + kMaxDistToPool))); |
+ (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool))); |
no_const_pool_before_ = pc_limit; |
} |
- if (next_buffer_check_ < no_const_pool_before_) { |
- next_buffer_check_ = no_const_pool_before_; |
+ if (next_constant_pool_check_ < no_const_pool_before_) { |
+ next_constant_pool_check_ = no_const_pool_before_; |
} |
} |
@@ -2470,42 +2500,47 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { |
// There is nothing to do if there are no pending constant pool entries. |
if (num_pending_reloc_info_ == 0) { |
// Calculate the offset of the next check. |
- next_buffer_check_ = pc_offset() + kCheckPoolInterval; |
+ next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; |
return; |
} |
// We emit a constant pool when: |
// * requested to do so by parameter force_emit (e.g. after each function). |
// * the distance to the first instruction accessing the constant pool is |
- // kAvgDistToPool or more. |
+ // kAvgDistToConstPool or more. |
// * no jump is required and the distance to the first instruction accessing |
- // the constant pool is at least kMaxDistToPool / 2. |
+ // the constant pool is at least kMaxDistToPConstool / 2. |
ASSERT(first_const_pool_use_ >= 0); |
int dist = pc_offset() - first_const_pool_use_; |
- if (!force_emit && dist < kAvgDistToPool && |
- (require_jump || (dist < (kMaxDistToPool / 2)))) { |
+ if (!force_emit && dist < kAvgDistToConstPool && |
+ (require_jump || (dist < (kMaxDistToConstPool / 2)))) { |
return; |
} |
- Label size_check; |
- bind(&size_check); |
- |
- // Check that the code buffer is large enough before emitting the constant |
- // pool (include the jump over the pool, the constant pool marker, the |
- // constant pool guard, and the gap to the relocation information). |
int jump_instr = require_jump ? kInstructionSize : 0; |
int size_pool_marker = kInstructionSize; |
int size_pool_guard = kInstructionSize; |
int pool_size = jump_instr + size_pool_marker + size_pool_guard + |
num_pending_reloc_info_ * kPointerSize; |
int needed_space = pool_size + kGap; |
+ |
+ // Emit veneers for branches that would go out of range during emission of the |
+ // constant pool. |
+ CheckVeneerPool(require_jump, kVeneerDistanceMargin - pool_size); |
+ |
+ Label size_check; |
+ bind(&size_check); |
+ |
+ // Check that the code buffer is large enough before emitting the constant |
+ // pool (include the jump over the pool, the constant pool marker, the |
+ // constant pool guard, and the gap to the relocation information). |
while (buffer_space() <= needed_space) { |
GrowBuffer(); |
} |
{ |
- // Block recursive calls to CheckConstPool. |
- BlockConstPoolScope block_const_pool(this); |
+ // Block recursive calls to CheckConstPool and protect from veneer pools. |
+ BlockPoolsScope block_pools(this); |
RecordComment("[ Constant Pool"); |
RecordConstPool(pool_size); |
@@ -2558,13 +2593,114 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { |
// Since a constant pool was just emitted, move the check offset forward by |
// the standard interval. |
- next_buffer_check_ = pc_offset() + kCheckPoolInterval; |
+ next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; |
ASSERT(SizeOfCodeGeneratedSince(&size_check) == |
static_cast<unsigned>(pool_size)); |
} |
+bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) { |
+ // Account for the branch around the veneers and the guard. |
+ int protection_offset = 2 * kInstructionSize; |
+ return pc_offset() > max_reachable_pc - margin - protection_offset - |
+ static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize); |
+} |
+ |
+ |
+void Assembler::EmitVeneers(bool need_protection, int margin) { |
+ BlockPoolsScope scope(this); |
+ RecordComment("[ Veneers"); |
+ |
+ Label end; |
+ if (need_protection) { |
+ b(&end); |
+ } |
+ |
+ EmitVeneersGuard(); |
+ |
+ Label size_check; |
+ |
+ std::multimap<int, FarBranchInfo>::iterator it, it_to_delete; |
+ |
+ it = unresolved_branches_.begin(); |
+ while (it != unresolved_branches_.end()) { |
+ if (ShouldEmitVeneer(it->first, margin)) { |
+ Instruction* branch = InstructionAt(it->second.pc_offset_); |
+ Label* label = it->second.label_; |
+ |
+#ifdef DEBUG |
+ bind(&size_check); |
+#endif |
+ // Patch the branch to point to the current position, and emit a branch |
+ // to the label. |
+ Instruction* veneer = reinterpret_cast<Instruction*>(pc_); |
+ RemoveBranchFromLabelLinkChain(branch, label, veneer); |
+ branch->SetImmPCOffsetTarget(veneer); |
+ b(label); |
+#ifdef DEBUG |
+ ASSERT(SizeOfCodeGeneratedSince(&size_check) <= |
+ static_cast<uint64_t>(kMaxVeneerCodeSize)); |
+ size_check.Unuse(); |
+#endif |
+ |
+ it_to_delete = it++; |
+ unresolved_branches_.erase(it_to_delete); |
+ } else { |
+ ++it; |
+ } |
+ } |
+ |
+ bind(&end); |
+ |
+ RecordComment("]"); |
+ |
+ if (unresolved_branches_.empty()) { |
+ next_veneer_pool_check_ = kMaxInt; |
+ } else { |
+ next_veneer_pool_check_ = |
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin; |
+ } |
+} |
+ |
+ |
+void Assembler::EmitVeneersGuard() { |
+ if (emit_debug_code()) { |
+ Unreachable(); |
+ } |
+} |
+ |
+ |
+void Assembler::CheckVeneerPool(bool require_jump, |
+ int margin) { |
+ // There is nothing to do if there are no pending veneer pool entries. |
+ if (unresolved_branches_.empty()) { |
+ ASSERT(next_veneer_pool_check_ == kMaxInt); |
+ return; |
+ } |
+ |
+ ASSERT(pc_offset() < unresolved_branches_first_limit()); |
+ |
+ // Some short sequence of instruction mustn't be broken up by veneer pool |
+ // emission, such sequences are protected by calls to BlockVeneerPoolFor and |
+ // BlockVeneerPoolScope. |
+ if (is_veneer_pool_blocked()) { |
+ return; |
+ } |
+ |
+ if (!require_jump) { |
+ // Prefer emitting veneers protected by an existing instruction. |
+ margin *= kVeneerNoProtectionFactor; |
+ } |
+ if (ShouldEmitVeneers(margin)) { |
+ EmitVeneers(require_jump, margin); |
+ } else { |
+ next_veneer_pool_check_ = |
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin; |
+ } |
+} |
+ |
+ |
void Assembler::RecordComment(const char* msg) { |
if (FLAG_code_comments) { |
CheckBuffer(); |