Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "src/compiler/bytecode-basic-block-analysis.h" | |
| 6 | |
| 7 #include "src/interpreter/bytecode-array-iterator.h" | |
| 8 | |
| 9 namespace v8 { | |
| 10 namespace internal { | |
| 11 namespace compiler { | |
| 12 | |
| 13 namespace { | |
| 14 | |
| 15 int GetJumpAbsoluteOffset(const interpreter::BytecodeArrayIterator& iterator) { | |
| 16 interpreter::Bytecode bytecode = iterator.current_bytecode(); | |
| 17 if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) { | |
| 18 int relative_offset = iterator.GetImmediateOperand(0); | |
| 19 return iterator.current_offset() + relative_offset; | |
| 20 } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) { | |
| 21 Smi* smi = Smi::cast(*iterator.GetConstantForIndexOperand(0)); | |
| 22 return iterator.current_offset() + smi->value(); | |
| 23 } else { | |
| 24 UNREACHABLE(); | |
| 25 return -1; | |
| 26 } | |
| 27 } | |
| 28 | |
| 29 } // namespace | |
| 30 | |
| 31 | |
| 32 BytecodeBasicBlockAnalysis::BytecodeBasicBlockAnalysis( | |
| 33 Zone* zone, Handle<BytecodeArray> bytecode_array) | |
| 34 : zone_(zone), | |
| 35 block_map_(zone), | |
| 36 rpo_order_(zone), | |
| 37 dependency_order_(zone), | |
| 38 bytecode_array_(bytecode_array) {} | |
| 39 | |
| 40 | |
| 41 const ZoneVector<BytecodeBasicBlock*>* BytecodeBasicBlockAnalysis::Analyze() { | |
| 42 CreateBasicBlocks(); | |
| 43 LinkBasicBlocks(); | |
| 44 AssignRpoOrder(); | |
| 45 FindDominators(); | |
| 46 FindDependencyOrder(); | |
| 47 return &dependency_order_; | |
| 48 } | |
| 49 | |
| 50 | |
| 51 void BytecodeBasicBlockAnalysis::CreateBasicBlock(int start, int end) { | |
| 52 DCHECK(block_map_.find(start) == block_map_.end()); | |
| 53 BytecodeBasicBlock* new_block = | |
| 54 new (zone()) BytecodeBasicBlock(zone(), start, end); | |
| 55 block_map_.insert(std::make_pair(start, new_block)); | |
| 56 } | |
| 57 | |
| 58 | |
| 59 void BytecodeBasicBlockAnalysis::SplitBasicBlock(int offset) { | |
| 60 // Find the basic blocking containing offset. | |
| 61 auto offset_block_pair = block_map_.lower_bound(offset); | |
| 62 if (offset_block_pair->first > offset) { | |
|
rmcilroy
2015/12/08 13:25:07
nit - comment would be good here
oth
2015/12/09 11:26:45
Done.
| |
| 63 offset_block_pair--; | |
| 64 } | |
| 65 DCHECK(offset_block_pair->first <= offset); | |
| 66 | |
| 67 if (offset_block_pair->first < offset) { | |
|
rmcilroy
2015/12/08 13:25:07
nit - offset_block_pair->first != offset (for clar
oth
2015/12/09 11:26:44
Done.
| |
| 68 // Only split if offset does not point to an existing block start. | |
| 69 int split_end = offset_block_pair->second->end(); | |
| 70 offset_block_pair->second->set_end(offset); | |
| 71 CreateBasicBlock(offset, split_end); | |
| 72 } | |
| 73 } | |
| 74 | |
| 75 | |
| 76 void BytecodeBasicBlockAnalysis::CreateBasicBlocks() { | |
| 77 interpreter::BytecodeArrayIterator iterator(bytecode_array_); | |
| 78 ZoneVector<int> split_offsets(zone()); | |
|
rmcilroy
2015/12/08 13:25:07
nit - jump_targets would be clearer here IMO
oth
2015/12/09 11:26:45
Done.
| |
| 79 | |
| 80 // Create the exit block. | |
| 81 CreateBasicBlock(bytecode_array_->length(), kMaxInt); | |
| 82 | |
| 83 // Find and allocate basic blocks in the bytecode. | |
| 84 while (!iterator.done()) { | |
| 85 int block_start = iterator.current_offset(); | |
| 86 interpreter::Bytecode bytecode = iterator.current_bytecode(); | |
| 87 while (!interpreter::Bytecodes::IsLocalControlFlow(bytecode) && | |
| 88 iterator.current_offset() + iterator.current_size() < | |
| 89 bytecode_array_->length()) { | |
|
rmcilroy
2015/12/08 13:25:07
Can you not call !iterator.done() instead of "iter
oth
2015/12/09 11:26:45
Yes, flattened into a single loop.
| |
| 90 iterator.Advance(); | |
| 91 bytecode = iterator.current_bytecode(); | |
| 92 } | |
| 93 CreateBasicBlock(block_start, | |
| 94 iterator.current_offset() + iterator.current_size()); | |
| 95 if (interpreter::Bytecodes::IsJump(bytecode)) { | |
| 96 split_offsets.push_back(GetJumpAbsoluteOffset(iterator)); | |
| 97 } | |
| 98 iterator.Advance(); | |
| 99 } | |
| 100 | |
| 101 for (auto split_offset = split_offsets.begin(); | |
|
rmcilroy
2015/12/08 13:25:07
Comment that you are splitting basic blocks based
oth
2015/12/09 11:26:45
Done.
| |
| 102 split_offset != split_offsets.end(); split_offset++) { | |
| 103 SplitBasicBlock(*split_offset); | |
| 104 } | |
| 105 } | |
| 106 | |
| 107 | |
| 108 void BytecodeBasicBlockAnalysis::LinkBasicBlocks() { | |
| 109 interpreter::BytecodeArrayIterator iterator(bytecode_array_); | |
| 110 while (!iterator.done()) { | |
| 111 // Find the last last bytecode in each block. | |
| 112 auto offset_block_pair = block_map_.find(iterator.current_offset()); | |
| 113 | |
| 114 DCHECK(offset_block_pair != block_map_.end()); | |
| 115 BytecodeBasicBlock* current_block = offset_block_pair->second; | |
| 116 DCHECK_NE(current_block, exit()); | |
| 117 while (iterator.current_offset() + | |
| 118 interpreter::Bytecodes::Size(iterator.current_bytecode()) != | |
|
rmcilroy
2015/12/08 13:25:07
iterator.current_bytecode_size() ?
oth
2015/12/09 11:26:44
Removed current_bytecode from BytecodeArrayIterato
| |
| 119 current_block->end()) { | |
| 120 DCHECK_LT(iterator.current_offset(), current_block->end()); | |
| 121 iterator.Advance(); | |
| 122 } | |
| 123 current_block->set_last_bytecode_offset(iterator.current_offset()); | |
| 124 | |
| 125 // Connect up links with other blocks. | |
| 126 interpreter::Bytecode bytecode = iterator.current_bytecode(); | |
| 127 if (interpreter::Bytecodes::IsConditionalJump(bytecode)) { | |
| 128 int target_offset = GetJumpAbsoluteOffset(iterator); | |
| 129 current_block->end_with_conditional_jump( | |
| 130 block_map_[target_offset], block_map_[current_block->end()]); | |
| 131 } else if (interpreter::Bytecodes::IsJump(bytecode)) { | |
| 132 int target_offset = GetJumpAbsoluteOffset(iterator); | |
| 133 current_block->end_with_block(block_map_[target_offset]); | |
| 134 } else if (!interpreter::Bytecodes::IsLocalControlFlow(bytecode)) { | |
| 135 current_block->end_with_block(block_map_[current_block->end()]); | |
| 136 } else { | |
| 137 DCHECK_EQ(bytecode, interpreter::Bytecode::kReturn); | |
| 138 current_block->end_with_block(exit()); | |
| 139 } | |
| 140 iterator.Advance(); | |
| 141 } | |
| 142 } | |
| 143 | |
| 144 | |
| 145 void BytecodeBasicBlockAnalysis::AssignRpoOrder() { | |
| 146 // Assign reverse post-order id to aid with finding dominators. | |
| 147 ZoneStack<BytecodeBasicBlock*> pending(zone()); | |
| 148 ZoneStack<BytecodeBasicBlock*> ordered(zone()); | |
| 149 | |
| 150 pending.push(block_map_[0]); | |
| 151 while (!pending.empty()) { | |
| 152 BytecodeBasicBlock* current = pending.top(); | |
| 153 pending.pop(); | |
| 154 if (current->rpo_id() != BytecodeBasicBlock::kUnassignedId) { | |
| 155 continue; | |
| 156 } | |
| 157 | |
| 158 ordered.push(current); | |
| 159 current->set_rpo_id(0); | |
| 160 DCHECK((current->if_true() != nullptr || current->if_false() != nullptr) || | |
| 161 current->start() == bytecode_array_->length()); | |
| 162 | |
| 163 if (current->if_true() != nullptr && | |
| 164 current->if_true()->rpo_id() == BytecodeBasicBlock::kUnassignedId) { | |
| 165 pending.push(current->if_true()); | |
| 166 } | |
| 167 if (current->if_false() != nullptr && | |
| 168 current->if_false()->rpo_id() == BytecodeBasicBlock::kUnassignedId) { | |
| 169 pending.push(current->if_false()); | |
| 170 } | |
| 171 } | |
| 172 | |
| 173 // Non-reachable blocks are not assigned an RPO id and need to be pruned. | |
| 174 // This happens quite rarely, e.g. all paths in a switch return from function: | |
| 175 // function f(x) { | |
| 176 // switch (x) { | |
| 177 // case 'a': return 1; | |
| 178 // default: return 0; | |
| 179 // } | |
| 180 // return undefined; // Potentially inserted by bytecode generator. | |
| 181 // } | |
| 182 if (ordered.size() != block_map_.size()) { | |
| 183 int unreachable = 0; | |
| 184 for (auto iter = block_map_.begin(); iter != block_map_.end(); iter++) { | |
| 185 BytecodeBasicBlock* block = iter->second; | |
| 186 if (block->incoming_count() == 0 && block->start() > 0) { | |
| 187 if (block->if_true() != nullptr) { | |
| 188 auto it = std::find(block->if_true()->incoming_.begin(), | |
| 189 block->if_true()->incoming_.end(), block); | |
| 190 block->if_true()->incoming_.erase(it); | |
| 191 } | |
| 192 if (block->if_false() != nullptr) { | |
| 193 auto it = std::find(block->if_false()->incoming_.begin(), | |
| 194 block->if_false()->incoming_.end(), block); | |
| 195 block->if_false()->incoming_.erase(it); | |
| 196 } | |
| 197 unreachable++; | |
| 198 } | |
| 199 } | |
| 200 // Start has no incoming, but this is expected. | |
| 201 DCHECK_EQ(unreachable, block_map_.size() - ordered.size()); | |
| 202 } | |
| 203 | |
| 204 // Pop post-order entries and back insert into rpo_order_ vector. | |
| 205 int id = static_cast<int>(ordered.size()); | |
| 206 rpo_order_.resize(ordered.size()); | |
| 207 while (!ordered.empty()) { | |
| 208 BytecodeBasicBlock* current = ordered.top(); | |
| 209 current->set_rpo_id(--id); | |
| 210 rpo_order_[id] = current; | |
| 211 ordered.pop(); | |
| 212 } | |
| 213 } | |
| 214 | |
| 215 | |
| 216 void BytecodeBasicBlockAnalysis::FindDominators() { | |
| 217 // Naive implementation of finding dominators O(N^2). Lower cost | |
| 218 // algorithms exist, but have additional implementation complexity | |
| 219 // and higher fixed cost. This typically takes a maximum of 3 | |
| 220 // iterations to converge and in practice N is small. | |
| 221 DCHECK(!rpo_order_.empty()); | |
| 222 DCHECK(rpo_order_[0]->dominator_rpo_ids() == nullptr); | |
| 223 for (size_t i = 0; i < rpo_order_.size(); i++) { | |
| 224 rpo_order_[i]->set_dominator_rpo_ids( | |
| 225 new (zone()) BitVector(static_cast<int>(rpo_order_.size()), zone())); | |
| 226 if (i == 0) { | |
|
rmcilroy
2015/12/08 13:25:07
Could you add a comment on what this if/else block
oth
2015/12/09 11:26:45
Done and added a reference to the algorithm.
| |
| 227 rpo_order_[i]->dominator_rpo_ids()->Add(0); | |
| 228 } else { | |
| 229 rpo_order_[i]->dominator_rpo_ids()->AddAll(); | |
| 230 rpo_order_[i]->dominator_rpo_ids()->Remove(0); | |
| 231 } | |
| 232 } | |
| 233 | |
| 234 BitVector accumulator(static_cast<int>(rpo_order_.size()), zone()); | |
| 235 bool changed = true; | |
| 236 while (changed) { | |
|
rmcilroy
2015/12/08 13:25:07
A couple of comments here would be good.
oth
2015/12/09 11:26:45
Done.
| |
| 237 changed = false; | |
| 238 for (size_t i = 1; i < rpo_order_.size(); i++) { | |
| 239 for (size_t j = 0; j < rpo_order_[i]->incoming()->size(); j++) { | |
| 240 auto incoming = rpo_order_[i]->incoming()->at(j); | |
| 241 if (j == 0) { | |
| 242 accumulator.CopyFrom(*incoming->dominator_rpo_ids()); | |
| 243 } else { | |
| 244 accumulator.Intersect(*incoming->dominator_rpo_ids()); | |
| 245 } | |
| 246 } | |
| 247 accumulator.Add(static_cast<int>(i)); | |
| 248 if (!rpo_order_[i]->dominator_rpo_ids()->Equals(accumulator)) { | |
| 249 rpo_order_[i]->dominator_rpo_ids()->CopyFrom(accumulator); | |
| 250 changed = true; | |
| 251 } | |
| 252 } | |
| 253 } | |
| 254 } | |
| 255 | |
| 256 | |
| 257 void BytecodeBasicBlockAnalysis::FindDependencyOrder() { | |
| 258 // Compute the dependency order of basic blocks based on forward | |
| 259 // links. In dependency order, all forward linking blocks into a | |
| 260 // block are satisfied before the block. This simplifies the | |
| 261 // handling merges. | |
| 262 BitVector queued(static_cast<int>(block_map_.size()), zone()); | |
| 263 BitVector satisfied(static_cast<int>(block_map_.size()), zone()); | |
| 264 ZoneQueue<int> to_visit(zone()); | |
| 265 | |
| 266 dependency_order_.resize(rpo_order_.size()); | |
| 267 auto inserter = dependency_order_.begin(); | |
| 268 to_visit.push(start()->rpo_id()); | |
| 269 queued.Add(start()->rpo_id()); | |
| 270 while (!to_visit.empty()) { | |
| 271 int rpo_id = to_visit.front(); | |
| 272 queued.Remove(rpo_id); | |
| 273 to_visit.pop(); | |
| 274 if (satisfied.Contains(rpo_id)) { | |
| 275 continue; | |
| 276 } | |
| 277 | |
| 278 // Check whether incoming edges have satisfied dependencies. | |
| 279 BytecodeBasicBlock* current = rpo_order_[rpo_id]; | |
| 280 bool unsatisfied = false; | |
| 281 for (size_t i = 0; i < current->incoming_count(); i++) { | |
| 282 const BytecodeBasicBlock* incoming = current->incoming(i); | |
| 283 // Enqueue unsatisfied dependencies from incoming edges provided | |
| 284 // they are not in the queue already and are not loop | |
| 285 // headers. The latter are not considered dependencies. | |
| 286 int incoming_rpo_id = incoming->rpo_id(); | |
| 287 if (!satisfied.Contains(incoming_rpo_id) && | |
| 288 !incoming->is_dominated_by(current)) { | |
| 289 if (!queued.Contains(incoming_rpo_id)) { | |
| 290 to_visit.push(incoming_rpo_id); | |
| 291 queued.Add(incoming_rpo_id); | |
| 292 } | |
| 293 unsatisfied = true; | |
| 294 } | |
| 295 } | |
| 296 | |
| 297 if (unsatisfied) { | |
| 298 // Dependencies not satisfied, put block back on list to process. | |
| 299 DCHECK(!queued.Contains(rpo_id)); | |
| 300 to_visit.push(rpo_id); | |
| 301 queued.Add(rpo_id); | |
| 302 } else { | |
| 303 // Dependencies satisfied, time to visit the blocks attached to | |
| 304 // out edges. | |
| 305 *inserter++ = current; | |
| 306 satisfied.Add(rpo_id); | |
| 307 BytecodeBasicBlock* nodes[2] = {current->if_true(), current->if_false()}; | |
| 308 for (size_t i = 0; i < 2; i++) { | |
| 309 if (nodes[i] != nullptr && !satisfied.Contains(nodes[i]->rpo_id()) && | |
| 310 !current->is_dominated_by(nodes[i])) { | |
| 311 if (!queued.Contains(nodes[i]->rpo_id())) { | |
| 312 to_visit.push(nodes[i]->rpo_id()); | |
| 313 queued.Add(nodes[i]->rpo_id()); | |
| 314 } | |
| 315 } | |
| 316 } | |
| 317 } | |
| 318 } | |
| 319 DCHECK(inserter == dependency_order_.end()); | |
| 320 } | |
| 321 | |
| 322 | |
| 323 } // namespace compiler | |
| 324 } // namespace internal | |
| 325 } // namespace v8 | |
| OLD | NEW |