Index: src/compiler/bytecode-basic-block-analysis.cc |
diff --git a/src/compiler/bytecode-basic-block-analysis.cc b/src/compiler/bytecode-basic-block-analysis.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..4db5d82903f6e54f6b5e3902c5ea04fe8c5f2061 |
--- /dev/null |
+++ b/src/compiler/bytecode-basic-block-analysis.cc |
@@ -0,0 +1,325 @@ |
+// Copyright 2015 the V8 project authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "src/compiler/bytecode-basic-block-analysis.h" |
+ |
+#include "src/interpreter/bytecode-array-iterator.h" |
+ |
+namespace v8 { |
+namespace internal { |
+namespace compiler { |
+ |
+namespace { |
+ |
+int GetJumpAbsoluteOffset(const interpreter::BytecodeArrayIterator& iterator) { |
+ interpreter::Bytecode bytecode = iterator.current_bytecode(); |
+ if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) { |
+ int relative_offset = iterator.GetImmediateOperand(0); |
+ return iterator.current_offset() + relative_offset; |
+ } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) { |
+ Smi* smi = Smi::cast(*iterator.GetConstantForIndexOperand(0)); |
+ return iterator.current_offset() + smi->value(); |
+ } else { |
+ UNREACHABLE(); |
+ return -1; |
+ } |
+} |
+ |
+} // namespace |
+ |
+ |
+BytecodeBasicBlockAnalysis::BytecodeBasicBlockAnalysis( |
+ Zone* zone, Handle<BytecodeArray> bytecode_array) |
+ : zone_(zone), |
+ block_map_(zone), |
+ rpo_order_(zone), |
+ dependency_order_(zone), |
+ bytecode_array_(bytecode_array) {} |
+ |
+ |
+const ZoneVector<BytecodeBasicBlock*>* BytecodeBasicBlockAnalysis::Analyze() { |
+ CreateBasicBlocks(); |
+ LinkBasicBlocks(); |
+ AssignRpoOrder(); |
+ FindDominators(); |
+ FindDependencyOrder(); |
+ return &dependency_order_; |
+} |
+ |
+ |
+void BytecodeBasicBlockAnalysis::CreateBasicBlock(int start, int end) { |
+ DCHECK(block_map_.find(start) == block_map_.end()); |
+ BytecodeBasicBlock* new_block = |
+ new (zone()) BytecodeBasicBlock(zone(), start, end); |
+ block_map_.insert(std::make_pair(start, new_block)); |
+} |
+ |
+ |
+void BytecodeBasicBlockAnalysis::SplitBasicBlock(int offset) { |
+ // Find the basic blocking containing offset. |
+ auto offset_block_pair = block_map_.lower_bound(offset); |
+ if (offset_block_pair->first > offset) { |
rmcilroy
2015/12/08 13:25:07
nit - comment would be good here
oth
2015/12/09 11:26:45
Done.
|
+ offset_block_pair--; |
+ } |
+ DCHECK(offset_block_pair->first <= offset); |
+ |
+ if (offset_block_pair->first < offset) { |
rmcilroy
2015/12/08 13:25:07
nit - offset_block_pair->first != offset (for clar
oth
2015/12/09 11:26:44
Done.
|
+ // Only split if offset does not point to an existing block start. |
+ int split_end = offset_block_pair->second->end(); |
+ offset_block_pair->second->set_end(offset); |
+ CreateBasicBlock(offset, split_end); |
+ } |
+} |
+ |
+ |
+void BytecodeBasicBlockAnalysis::CreateBasicBlocks() { |
+ interpreter::BytecodeArrayIterator iterator(bytecode_array_); |
+ ZoneVector<int> split_offsets(zone()); |
rmcilroy
2015/12/08 13:25:07
nit - jump_targets would be clearer here IMO
oth
2015/12/09 11:26:45
Done.
|
+ |
+ // Create the exit block. |
+ CreateBasicBlock(bytecode_array_->length(), kMaxInt); |
+ |
+ // Find and allocate basic blocks in the bytecode. |
+ while (!iterator.done()) { |
+ int block_start = iterator.current_offset(); |
+ interpreter::Bytecode bytecode = iterator.current_bytecode(); |
+ while (!interpreter::Bytecodes::IsLocalControlFlow(bytecode) && |
+ iterator.current_offset() + iterator.current_size() < |
+ bytecode_array_->length()) { |
rmcilroy
2015/12/08 13:25:07
Can you not call !iterator.done() instead of "iter
oth
2015/12/09 11:26:45
Yes, flattened into a single loop.
|
+ iterator.Advance(); |
+ bytecode = iterator.current_bytecode(); |
+ } |
+ CreateBasicBlock(block_start, |
+ iterator.current_offset() + iterator.current_size()); |
+ if (interpreter::Bytecodes::IsJump(bytecode)) { |
+ split_offsets.push_back(GetJumpAbsoluteOffset(iterator)); |
+ } |
+ iterator.Advance(); |
+ } |
+ |
+ for (auto split_offset = split_offsets.begin(); |
rmcilroy
2015/12/08 13:25:07
Comment that you are splitting basic blocks based
oth
2015/12/09 11:26:45
Done.
|
+ split_offset != split_offsets.end(); split_offset++) { |
+ SplitBasicBlock(*split_offset); |
+ } |
+} |
+ |
+ |
+void BytecodeBasicBlockAnalysis::LinkBasicBlocks() { |
+ interpreter::BytecodeArrayIterator iterator(bytecode_array_); |
+ while (!iterator.done()) { |
+ // Find the last last bytecode in each block. |
+ auto offset_block_pair = block_map_.find(iterator.current_offset()); |
+ |
+ DCHECK(offset_block_pair != block_map_.end()); |
+ BytecodeBasicBlock* current_block = offset_block_pair->second; |
+ DCHECK_NE(current_block, exit()); |
+ while (iterator.current_offset() + |
+ interpreter::Bytecodes::Size(iterator.current_bytecode()) != |
rmcilroy
2015/12/08 13:25:07
iterator.current_bytecode_size() ?
oth
2015/12/09 11:26:44
Removed current_bytecode from BytecodeArrayIterato
|
+ current_block->end()) { |
+ DCHECK_LT(iterator.current_offset(), current_block->end()); |
+ iterator.Advance(); |
+ } |
+ current_block->set_last_bytecode_offset(iterator.current_offset()); |
+ |
+ // Connect up links with other blocks. |
+ interpreter::Bytecode bytecode = iterator.current_bytecode(); |
+ if (interpreter::Bytecodes::IsConditionalJump(bytecode)) { |
+ int target_offset = GetJumpAbsoluteOffset(iterator); |
+ current_block->end_with_conditional_jump( |
+ block_map_[target_offset], block_map_[current_block->end()]); |
+ } else if (interpreter::Bytecodes::IsJump(bytecode)) { |
+ int target_offset = GetJumpAbsoluteOffset(iterator); |
+ current_block->end_with_block(block_map_[target_offset]); |
+ } else if (!interpreter::Bytecodes::IsLocalControlFlow(bytecode)) { |
+ current_block->end_with_block(block_map_[current_block->end()]); |
+ } else { |
+ DCHECK_EQ(bytecode, interpreter::Bytecode::kReturn); |
+ current_block->end_with_block(exit()); |
+ } |
+ iterator.Advance(); |
+ } |
+} |
+ |
+ |
+void BytecodeBasicBlockAnalysis::AssignRpoOrder() { |
+ // Assign reverse post-order id to aid with finding dominators. |
+ ZoneStack<BytecodeBasicBlock*> pending(zone()); |
+ ZoneStack<BytecodeBasicBlock*> ordered(zone()); |
+ |
+ pending.push(block_map_[0]); |
+ while (!pending.empty()) { |
+ BytecodeBasicBlock* current = pending.top(); |
+ pending.pop(); |
+ if (current->rpo_id() != BytecodeBasicBlock::kUnassignedId) { |
+ continue; |
+ } |
+ |
+ ordered.push(current); |
+ current->set_rpo_id(0); |
+ DCHECK((current->if_true() != nullptr || current->if_false() != nullptr) || |
+ current->start() == bytecode_array_->length()); |
+ |
+ if (current->if_true() != nullptr && |
+ current->if_true()->rpo_id() == BytecodeBasicBlock::kUnassignedId) { |
+ pending.push(current->if_true()); |
+ } |
+ if (current->if_false() != nullptr && |
+ current->if_false()->rpo_id() == BytecodeBasicBlock::kUnassignedId) { |
+ pending.push(current->if_false()); |
+ } |
+ } |
+ |
+ // Non-reachable blocks are not assigned an RPO id and need to be pruned. |
+ // This happens quite rarely, e.g. all paths in a switch return from function: |
+ // function f(x) { |
+ // switch (x) { |
+ // case 'a': return 1; |
+ // default: return 0; |
+ // } |
+ // return undefined; // Potentially inserted by bytecode generator. |
+ // } |
+ if (ordered.size() != block_map_.size()) { |
+ int unreachable = 0; |
+ for (auto iter = block_map_.begin(); iter != block_map_.end(); iter++) { |
+ BytecodeBasicBlock* block = iter->second; |
+ if (block->incoming_count() == 0 && block->start() > 0) { |
+ if (block->if_true() != nullptr) { |
+ auto it = std::find(block->if_true()->incoming_.begin(), |
+ block->if_true()->incoming_.end(), block); |
+ block->if_true()->incoming_.erase(it); |
+ } |
+ if (block->if_false() != nullptr) { |
+ auto it = std::find(block->if_false()->incoming_.begin(), |
+ block->if_false()->incoming_.end(), block); |
+ block->if_false()->incoming_.erase(it); |
+ } |
+ unreachable++; |
+ } |
+ } |
+ // Start has no incoming, but this is expected. |
+ DCHECK_EQ(unreachable, block_map_.size() - ordered.size()); |
+ } |
+ |
+ // Pop post-order entries and back insert into rpo_order_ vector. |
+ int id = static_cast<int>(ordered.size()); |
+ rpo_order_.resize(ordered.size()); |
+ while (!ordered.empty()) { |
+ BytecodeBasicBlock* current = ordered.top(); |
+ current->set_rpo_id(--id); |
+ rpo_order_[id] = current; |
+ ordered.pop(); |
+ } |
+} |
+ |
+ |
+void BytecodeBasicBlockAnalysis::FindDominators() { |
+ // Naive implementation of finding dominators O(N^2). Lower cost |
+ // algorithms exist, but have additional implementation complexity |
+ // and higher fixed cost. This typically takes a maximum of 3 |
+ // iterations to converge and in practice N is small. |
+ DCHECK(!rpo_order_.empty()); |
+ DCHECK(rpo_order_[0]->dominator_rpo_ids() == nullptr); |
+ for (size_t i = 0; i < rpo_order_.size(); i++) { |
+ rpo_order_[i]->set_dominator_rpo_ids( |
+ new (zone()) BitVector(static_cast<int>(rpo_order_.size()), zone())); |
+ if (i == 0) { |
rmcilroy
2015/12/08 13:25:07
Could you add a comment on what this if/else block
oth
2015/12/09 11:26:45
Done and added a reference to the algorithm.
|
+ rpo_order_[i]->dominator_rpo_ids()->Add(0); |
+ } else { |
+ rpo_order_[i]->dominator_rpo_ids()->AddAll(); |
+ rpo_order_[i]->dominator_rpo_ids()->Remove(0); |
+ } |
+ } |
+ |
+ BitVector accumulator(static_cast<int>(rpo_order_.size()), zone()); |
+ bool changed = true; |
+ while (changed) { |
rmcilroy
2015/12/08 13:25:07
A couple of comments here would be good.
oth
2015/12/09 11:26:45
Done.
|
+ changed = false; |
+ for (size_t i = 1; i < rpo_order_.size(); i++) { |
+ for (size_t j = 0; j < rpo_order_[i]->incoming()->size(); j++) { |
+ auto incoming = rpo_order_[i]->incoming()->at(j); |
+ if (j == 0) { |
+ accumulator.CopyFrom(*incoming->dominator_rpo_ids()); |
+ } else { |
+ accumulator.Intersect(*incoming->dominator_rpo_ids()); |
+ } |
+ } |
+ accumulator.Add(static_cast<int>(i)); |
+ if (!rpo_order_[i]->dominator_rpo_ids()->Equals(accumulator)) { |
+ rpo_order_[i]->dominator_rpo_ids()->CopyFrom(accumulator); |
+ changed = true; |
+ } |
+ } |
+ } |
+} |
+ |
+ |
+void BytecodeBasicBlockAnalysis::FindDependencyOrder() { |
+ // Compute the dependency order of basic blocks based on forward |
+ // links. In dependency order, all forward linking blocks into a |
+ // block are satisfied before the block. This simplifies the |
+ // handling merges. |
+ BitVector queued(static_cast<int>(block_map_.size()), zone()); |
+ BitVector satisfied(static_cast<int>(block_map_.size()), zone()); |
+ ZoneQueue<int> to_visit(zone()); |
+ |
+ dependency_order_.resize(rpo_order_.size()); |
+ auto inserter = dependency_order_.begin(); |
+ to_visit.push(start()->rpo_id()); |
+ queued.Add(start()->rpo_id()); |
+ while (!to_visit.empty()) { |
+ int rpo_id = to_visit.front(); |
+ queued.Remove(rpo_id); |
+ to_visit.pop(); |
+ if (satisfied.Contains(rpo_id)) { |
+ continue; |
+ } |
+ |
+ // Check whether incoming edges have satisfied dependencies. |
+ BytecodeBasicBlock* current = rpo_order_[rpo_id]; |
+ bool unsatisfied = false; |
+ for (size_t i = 0; i < current->incoming_count(); i++) { |
+ const BytecodeBasicBlock* incoming = current->incoming(i); |
+ // Enqueue unsatisfied dependencies from incoming edges provided |
+ // they are not in the queue already and are not loop |
+ // headers. The latter are not considered dependencies. |
+ int incoming_rpo_id = incoming->rpo_id(); |
+ if (!satisfied.Contains(incoming_rpo_id) && |
+ !incoming->is_dominated_by(current)) { |
+ if (!queued.Contains(incoming_rpo_id)) { |
+ to_visit.push(incoming_rpo_id); |
+ queued.Add(incoming_rpo_id); |
+ } |
+ unsatisfied = true; |
+ } |
+ } |
+ |
+ if (unsatisfied) { |
+ // Dependencies not satisfied, put block back on list to process. |
+ DCHECK(!queued.Contains(rpo_id)); |
+ to_visit.push(rpo_id); |
+ queued.Add(rpo_id); |
+ } else { |
+ // Dependencies satisfied, time to visit the blocks attached to |
+ // out edges. |
+ *inserter++ = current; |
+ satisfied.Add(rpo_id); |
+ BytecodeBasicBlock* nodes[2] = {current->if_true(), current->if_false()}; |
+ for (size_t i = 0; i < 2; i++) { |
+ if (nodes[i] != nullptr && !satisfied.Contains(nodes[i]->rpo_id()) && |
+ !current->is_dominated_by(nodes[i])) { |
+ if (!queued.Contains(nodes[i]->rpo_id())) { |
+ to_visit.push(nodes[i]->rpo_id()); |
+ queued.Add(nodes[i]->rpo_id()); |
+ } |
+ } |
+ } |
+ } |
+ } |
+ DCHECK(inserter == dependency_order_.end()); |
+} |
+ |
+ |
+} // namespace compiler |
+} // namespace internal |
+} // namespace v8 |