Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(655)

Side by Side Diff: src/compiler/register-allocator.cc

Issue 738853002: [turbofan]: delay ssa deconstruction in register allocator (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: rebase Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/compiler/register-allocator.h ('k') | src/compiler/register-allocator-verifier.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/linkage.h" 5 #include "src/compiler/linkage.h"
6 #include "src/compiler/register-allocator.h" 6 #include "src/compiler/register-allocator.h"
7 #include "src/string-stream.h" 7 #include "src/string-stream.h"
8 8
9 namespace v8 { 9 namespace v8 {
10 namespace internal { 10 namespace internal {
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
93 return false; 93 return false;
94 } 94 }
95 95
96 96
97 #endif 97 #endif
98 98
99 99
100 LiveRange::LiveRange(int id, Zone* zone) 100 LiveRange::LiveRange(int id, Zone* zone)
101 : id_(id), 101 : id_(id),
102 spilled_(false), 102 spilled_(false),
103 is_phi_(false),
104 is_non_loop_phi_(false),
105 kind_(UNALLOCATED_REGISTERS), 103 kind_(UNALLOCATED_REGISTERS),
106 assigned_register_(kInvalidAssignment), 104 assigned_register_(kInvalidAssignment),
107 last_interval_(NULL), 105 last_interval_(NULL),
108 first_interval_(NULL), 106 first_interval_(NULL),
109 first_pos_(NULL), 107 first_pos_(NULL),
110 parent_(NULL), 108 parent_(NULL),
111 next_(NULL), 109 next_(NULL),
112 current_interval_(NULL), 110 current_interval_(NULL),
113 last_processed_use_(NULL), 111 last_processed_use_(NULL),
114 current_hint_operand_(NULL), 112 current_hint_operand_(NULL),
(...skipping 868 matching lines...) Expand 10 before | Expand all | Expand 10 after
983 gap->GetOrCreateParallelMove(GapInstruction::START, code_zone()); 981 gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
984 const ZoneList<MoveOperands>* move_operands = move->move_operands(); 982 const ZoneList<MoveOperands>* move_operands = move->move_operands();
985 for (int i = 0; i < move_operands->length(); ++i) { 983 for (int i = 0; i < move_operands->length(); ++i) {
986 MoveOperands* cur = &move_operands->at(i); 984 MoveOperands* cur = &move_operands->at(i);
987 if (cur->IsIgnored()) continue; 985 if (cur->IsIgnored()) continue;
988 InstructionOperand* from = cur->source(); 986 InstructionOperand* from = cur->source();
989 InstructionOperand* to = cur->destination(); 987 InstructionOperand* to = cur->destination();
990 InstructionOperand* hint = to; 988 InstructionOperand* hint = to;
991 if (to->IsUnallocated()) { 989 if (to->IsUnallocated()) {
992 int to_vreg = UnallocatedOperand::cast(to)->virtual_register(); 990 int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
993 LiveRange* to_range = LiveRangeFor(to_vreg); 991 if (live->Contains(to_vreg)) {
994 if (to_range->is_phi()) { 992 Define(curr_position, to, from);
995 if (to_range->is_non_loop_phi()) { 993 live->Remove(to_vreg);
996 hint = to_range->current_hint_operand();
997 }
998 } else { 994 } else {
999 if (live->Contains(to_vreg)) { 995 cur->Eliminate();
1000 Define(curr_position, to, from); 996 continue;
1001 live->Remove(to_vreg);
1002 } else {
1003 cur->Eliminate();
1004 continue;
1005 }
1006 } 997 }
1007 } else { 998 } else {
1008 Define(curr_position, to, from); 999 Define(curr_position, to, from);
1009 } 1000 }
1010 Use(block_start_position, curr_position, from, hint); 1001 Use(block_start_position, curr_position, from, hint);
1011 if (from->IsUnallocated()) { 1002 if (from->IsUnallocated()) {
1012 live->Add(UnallocatedOperand::cast(from)->virtual_register()); 1003 live->Add(UnallocatedOperand::cast(from)->virtual_register());
1013 } 1004 }
1014 } 1005 }
1015 } else { 1006 } else {
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1075 } 1066 }
1076 } 1067 }
1077 Use(block_start_position, curr_position.InstructionEnd(), temp, NULL); 1068 Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
1078 Define(curr_position, temp, NULL); 1069 Define(curr_position, temp, NULL);
1079 } 1070 }
1080 } 1071 }
1081 } 1072 }
1082 } 1073 }
1083 1074
1084 1075
1085 void RegisterAllocator::ResolvePhis(const InstructionBlock* block) { 1076 void RegisterAllocator::ProcessPhis(const InstructionBlock* block) {
1086 for (auto phi : block->phis()) { 1077 for (auto phi : block->phis()) {
1087 UnallocatedOperand* phi_operand = 1078 auto output = phi->output();
1088 new (code_zone()) UnallocatedOperand(UnallocatedOperand::NONE);
1089 int phi_vreg = phi->virtual_register(); 1079 int phi_vreg = phi->virtual_register();
1090 phi_operand->set_virtual_register(phi_vreg);
1091
1092 for (size_t i = 0; i < phi->operands().size(); ++i) {
1093 UnallocatedOperand* operand =
1094 new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
1095 operand->set_virtual_register(phi->operands()[i]);
1096 InstructionBlock* cur_block =
1097 code()->InstructionBlockAt(block->predecessors()[i]);
1098 // The gap move must be added without any special processing as in
1099 // the AddConstraintsGapMove.
1100 code()->AddGapMove(cur_block->last_instruction_index() - 1, operand,
1101 phi_operand);
1102
1103 Instruction* branch = InstructionAt(cur_block->last_instruction_index());
1104 DCHECK(!branch->HasPointerMap());
1105 USE(branch);
1106 }
1107
1108 LiveRange* live_range = LiveRangeFor(phi_vreg); 1080 LiveRange* live_range = LiveRangeFor(phi_vreg);
1109 BlockStartInstruction* block_start = 1081 BlockStartInstruction* block_start =
1110 code()->GetBlockStart(block->rpo_number()); 1082 code()->GetBlockStart(block->rpo_number());
1111 block_start->GetOrCreateParallelMove(GapInstruction::START, code_zone()) 1083 block_start->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone())
1112 ->AddMove(phi_operand, live_range->GetSpillOperand(), code_zone()); 1084 ->AddMove(output, live_range->GetSpillOperand(), code_zone());
1113 live_range->SetSpillStartIndex(block->first_instruction_index()); 1085 live_range->SetSpillStartIndex(block->first_instruction_index());
1114
1115 // We use the phi-ness of some nodes in some later heuristics.
1116 live_range->set_is_phi(true);
1117 if (!block->IsLoopHeader()) {
1118 live_range->set_is_non_loop_phi(true);
1119 }
1120 } 1086 }
1121 } 1087 }
1122 1088
1123 1089
1124 void RegisterAllocator::MeetRegisterConstraints() { 1090 void RegisterAllocator::MeetRegisterConstraints() {
1125 for (auto block : code()->instruction_blocks()) { 1091 for (auto block : code()->instruction_blocks()) {
1092 ProcessPhis(block);
1126 MeetRegisterConstraints(block); 1093 MeetRegisterConstraints(block);
1127 if (!AllocationOk()) return;
1128 } 1094 }
1129 } 1095 }
1130 1096
1131
1132 void RegisterAllocator::ResolvePhis() {
1133 // Process the blocks in reverse order.
1134 for (auto i = code()->instruction_blocks().rbegin();
1135 i != code()->instruction_blocks().rend(); ++i) {
1136 ResolvePhis(*i);
1137 }
1138 }
1139
1140 1097
1141 ParallelMove* RegisterAllocator::GetConnectingParallelMove( 1098 ParallelMove* RegisterAllocator::GetConnectingParallelMove(
1142 LifetimePosition pos) { 1099 LifetimePosition pos) {
1143 int index = pos.InstructionIndex(); 1100 int index = pos.InstructionIndex();
1144 if (code()->IsGapAt(index)) { 1101 if (code()->IsGapAt(index)) {
1145 GapInstruction* gap = code()->GapAt(index); 1102 GapInstruction* gap = code()->GapAt(index);
1146 return gap->GetOrCreateParallelMove( 1103 return gap->GetOrCreateParallelMove(
1147 pos.IsInstructionStart() ? GapInstruction::START : GapInstruction::END, 1104 pos.IsInstructionStart() ? GapInstruction::START : GapInstruction::END,
1148 code_zone()); 1105 code_zone());
1149 } 1106 }
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
1259 if (bound->start_.Value() <= position.Value()) { 1216 if (bound->start_.Value() <= position.Value()) {
1260 if (position.Value() < bound->end_.Value()) return bound; 1217 if (position.Value() < bound->end_.Value()) return bound;
1261 DCHECK(left_index < current_index); 1218 DCHECK(left_index < current_index);
1262 left_index = current_index; 1219 left_index = current_index;
1263 } else { 1220 } else {
1264 right_index = current_index; 1221 right_index = current_index;
1265 } 1222 }
1266 } 1223 }
1267 } 1224 }
1268 1225
1226 LiveRangeBound* FindPred(const InstructionBlock* pred) {
1227 const LifetimePosition pred_end =
1228 LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
1229 return Find(pred_end);
1230 }
1231
1232 LiveRangeBound* FindSucc(const InstructionBlock* succ) {
1233 const LifetimePosition succ_start =
1234 LifetimePosition::FromInstructionIndex(succ->first_instruction_index());
1235 return Find(succ_start);
1236 }
1237
1269 void Find(const InstructionBlock* block, const InstructionBlock* pred, 1238 void Find(const InstructionBlock* block, const InstructionBlock* pred,
1270 FindResult* result) const { 1239 FindResult* result) const {
1271 const LifetimePosition pred_end = 1240 const LifetimePosition pred_end =
1272 LifetimePosition::FromInstructionIndex(pred->last_instruction_index()); 1241 LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
1273 LiveRangeBound* bound = Find(pred_end); 1242 LiveRangeBound* bound = Find(pred_end);
1274 result->pred_cover_ = bound->range_; 1243 result->pred_cover_ = bound->range_;
1275 const LifetimePosition cur_start = LifetimePosition::FromInstructionIndex( 1244 const LifetimePosition cur_start = LifetimePosition::FromInstructionIndex(
1276 block->first_instruction_index()); 1245 block->first_instruction_index());
1277 // Common case. 1246 // Common case.
1278 if (bound->CanCover(cur_start)) { 1247 if (bound->CanCover(cur_start)) {
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
1323 }; 1292 };
1324 1293
1325 } // namespace 1294 } // namespace
1326 1295
1327 1296
1328 void RegisterAllocator::ResolveControlFlow() { 1297 void RegisterAllocator::ResolveControlFlow() {
1329 // Lazily linearize live ranges in memory for fast lookup. 1298 // Lazily linearize live ranges in memory for fast lookup.
1330 LiveRangeFinder finder(*this); 1299 LiveRangeFinder finder(*this);
1331 for (auto block : code()->instruction_blocks()) { 1300 for (auto block : code()->instruction_blocks()) {
1332 if (CanEagerlyResolveControlFlow(block)) continue; 1301 if (CanEagerlyResolveControlFlow(block)) continue;
1302 // resolve phis
1303 for (auto phi : block->phis()) {
1304 auto* block_bound =
1305 finder.ArrayFor(phi->virtual_register())->FindSucc(block);
1306 auto phi_output = block_bound->range_->CreateAssignedOperand(code_zone());
1307 phi->output()->ConvertTo(phi_output->kind(), phi_output->index());
1308 size_t pred_index = 0;
1309 for (auto pred : block->predecessors()) {
1310 const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
1311 auto* pred_bound =
1312 finder.ArrayFor(phi->operands()[pred_index])->FindPred(pred_block);
1313 auto pred_op = pred_bound->range_->CreateAssignedOperand(code_zone());
1314 phi->inputs()[pred_index] = pred_op;
1315 ResolveControlFlow(block, phi_output, pred_block, pred_op);
1316 pred_index++;
1317 }
1318 }
1333 BitVector* live = live_in_sets_[block->rpo_number().ToInt()]; 1319 BitVector* live = live_in_sets_[block->rpo_number().ToInt()];
1334 BitVector::Iterator iterator(live); 1320 BitVector::Iterator iterator(live);
1335 while (!iterator.Done()) { 1321 while (!iterator.Done()) {
1336 LiveRangeBoundArray* array = finder.ArrayFor(iterator.Current()); 1322 auto* array = finder.ArrayFor(iterator.Current());
1337 for (auto pred : block->predecessors()) { 1323 for (auto pred : block->predecessors()) {
1338 FindResult result; 1324 FindResult result;
1339 const InstructionBlock* pred_block = code()->InstructionBlockAt(pred); 1325 const auto* pred_block = code()->InstructionBlockAt(pred);
1340 array->Find(block, pred_block, &result); 1326 array->Find(block, pred_block, &result);
1341 if (result.cur_cover_ == result.pred_cover_ || 1327 if (result.cur_cover_ == result.pred_cover_ ||
1342 result.cur_cover_->IsSpilled()) 1328 result.cur_cover_->IsSpilled())
1343 continue; 1329 continue;
1344 ResolveControlFlow(block, result.cur_cover_, pred_block, 1330 InstructionOperand* pred_op =
1345 result.pred_cover_); 1331 result.pred_cover_->CreateAssignedOperand(code_zone());
1332 InstructionOperand* cur_op =
1333 result.cur_cover_->CreateAssignedOperand(code_zone());
1334 ResolveControlFlow(block, cur_op, pred_block, pred_op);
1346 } 1335 }
1347 iterator.Advance(); 1336 iterator.Advance();
1348 } 1337 }
1349 } 1338 }
1350 } 1339 }
1351 1340
1352 1341
1353 void RegisterAllocator::ResolveControlFlow(const InstructionBlock* block, 1342 void RegisterAllocator::ResolveControlFlow(const InstructionBlock* block,
1354 const LiveRange* cur_cover, 1343 InstructionOperand* cur_op,
1355 const InstructionBlock* pred, 1344 const InstructionBlock* pred,
1356 const LiveRange* pred_cover) { 1345 InstructionOperand* pred_op) {
1357 InstructionOperand* pred_op = pred_cover->CreateAssignedOperand(code_zone()); 1346 if (pred_op->Equals(cur_op)) return;
1358 InstructionOperand* cur_op = cur_cover->CreateAssignedOperand(code_zone()); 1347 GapInstruction* gap = nullptr;
1359 if (!pred_op->Equals(cur_op)) { 1348 if (block->PredecessorCount() == 1) {
1360 GapInstruction* gap = NULL; 1349 gap = code()->GapAt(block->first_instruction_index());
1361 if (block->PredecessorCount() == 1) { 1350 } else {
1362 gap = code()->GapAt(block->first_instruction_index()); 1351 DCHECK(pred->SuccessorCount() == 1);
1363 } else { 1352 gap = GetLastGap(pred);
1364 DCHECK(pred->SuccessorCount() == 1);
1365 gap = GetLastGap(pred);
1366 1353
1367 Instruction* branch = InstructionAt(pred->last_instruction_index()); 1354 Instruction* branch = InstructionAt(pred->last_instruction_index());
1368 DCHECK(!branch->HasPointerMap()); 1355 DCHECK(!branch->HasPointerMap());
1369 USE(branch); 1356 USE(branch);
1370 }
1371 gap->GetOrCreateParallelMove(GapInstruction::START, code_zone())
1372 ->AddMove(pred_op, cur_op, code_zone());
1373 } 1357 }
1358 gap->GetOrCreateParallelMove(GapInstruction::START, code_zone())
1359 ->AddMove(pred_op, cur_op, code_zone());
1374 } 1360 }
1375 1361
1376 1362
1377 void RegisterAllocator::BuildLiveRanges() { 1363 void RegisterAllocator::BuildLiveRanges() {
1378 InitializeLivenessAnalysis(); 1364 InitializeLivenessAnalysis();
1379 // Process the blocks in reverse order. 1365 // Process the blocks in reverse order.
1380 for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0; 1366 for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
1381 --block_id) { 1367 --block_id) {
1382 const InstructionBlock* block = 1368 const InstructionBlock* block =
1383 code()->InstructionBlockAt(BasicBlock::RpoNumber::FromInt(block_id)); 1369 code()->InstructionBlockAt(BasicBlock::RpoNumber::FromInt(block_id));
1384 BitVector* live = ComputeLiveOut(block); 1370 BitVector* live = ComputeLiveOut(block);
1385 // Initially consider all live_out values live for the entire block. We 1371 // Initially consider all live_out values live for the entire block. We
1386 // will shorten these intervals if necessary. 1372 // will shorten these intervals if necessary.
1387 AddInitialIntervals(block, live); 1373 AddInitialIntervals(block, live);
1388 1374
1389 // Process the instructions in reverse order, generating and killing 1375 // Process the instructions in reverse order, generating and killing
1390 // live values. 1376 // live values.
1391 ProcessInstructions(block, live); 1377 ProcessInstructions(block, live);
1392 // All phi output operands are killed by this block. 1378 // All phi output operands are killed by this block.
1393 for (auto phi : block->phis()) { 1379 for (auto phi : block->phis()) {
1394 // The live range interval already ends at the first instruction of the 1380 // The live range interval already ends at the first instruction of the
1395 // block. 1381 // block.
1396 int phi_vreg = phi->virtual_register(); 1382 int phi_vreg = phi->virtual_register();
1397 live->Remove(phi_vreg); 1383 live->Remove(phi_vreg);
1398
1399 InstructionOperand* hint = NULL;
1400 InstructionOperand* phi_operand = NULL;
1401 GapInstruction* gap =
1402 GetLastGap(code()->InstructionBlockAt(block->predecessors()[0]));
1403
1404 // TODO(titzer): no need to create the parallel move if it doesn't exit.
1405 ParallelMove* move =
1406 gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
1407 for (int j = 0; j < move->move_operands()->length(); ++j) {
1408 InstructionOperand* to = move->move_operands()->at(j).destination();
1409 if (to->IsUnallocated() &&
1410 UnallocatedOperand::cast(to)->virtual_register() == phi_vreg) {
1411 hint = move->move_operands()->at(j).source();
1412 phi_operand = to;
1413 break;
1414 }
1415 }
1416 DCHECK(hint != NULL);
1417
1418 LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
1419 block->first_instruction_index());
1420 Define(block_start, phi_operand, hint);
1421 } 1384 }
1422 1385
1423 // Now live is live_in for this block except not including values live 1386 // Now live is live_in for this block except not including values live
1424 // out on backward successor edges. 1387 // out on backward successor edges.
1425 live_in_sets_[block_id] = live; 1388 live_in_sets_[block_id] = live;
1426 1389
1427 if (block->IsLoopHeader()) { 1390 if (block->IsLoopHeader()) {
1428 // Add a live range stretching from the first loop instruction to the last 1391 // Add a live range stretching from the first loop instruction to the last
1429 // for each value live on entry to the header. 1392 // for each value live on entry to the header.
1430 BitVector::Iterator iterator(live); 1393 BitVector::Iterator iterator(live);
(...skipping 24 matching lines...) Expand all
1455 // TODO(bmeurer): This is a horrible hack to make sure that for constant 1418 // TODO(bmeurer): This is a horrible hack to make sure that for constant
1456 // live ranges, every use requires the constant to be in a register. 1419 // live ranges, every use requires the constant to be in a register.
1457 // Without this hack, all uses with "any" policy would get the constant 1420 // Without this hack, all uses with "any" policy would get the constant
1458 // operand assigned. 1421 // operand assigned.
1459 LiveRange* range = live_ranges_[i]; 1422 LiveRange* range = live_ranges_[i];
1460 if (range->HasAllocatedSpillOperand() && 1423 if (range->HasAllocatedSpillOperand() &&
1461 range->GetSpillOperand()->IsConstant()) { 1424 range->GetSpillOperand()->IsConstant()) {
1462 for (UsePosition* pos = range->first_pos(); pos != NULL; 1425 for (UsePosition* pos = range->first_pos(); pos != NULL;
1463 pos = pos->next_) { 1426 pos = pos->next_) {
1464 pos->register_beneficial_ = true; 1427 pos->register_beneficial_ = true;
1465 // TODO(dcarney): should the else case assert requires_reg_ == false? 1428 pos->requires_reg_ = true;
1466 // Can't mark phis as needing a register.
1467 if (!code()
1468 ->InstructionAt(pos->pos().InstructionIndex())
1469 ->IsGapMoves()) {
1470 pos->requires_reg_ = true;
1471 }
1472 } 1429 }
1473 } 1430 }
1474 } 1431 }
1475 } 1432 }
1476 } 1433 }
1477 1434
1478 1435
1479 bool RegisterAllocator::ExistsUseWithoutDefinition() { 1436 bool RegisterAllocator::ExistsUseWithoutDefinition() {
1480 bool found = false; 1437 bool found = false;
1481 BitVector::Iterator iterator(live_in_sets_[0]); 1438 BitVector::Iterator iterator(live_in_sets_[0]);
(...skipping 794 matching lines...) Expand 10 before | Expand all | Expand 10 after
2276 } else { 2233 } else {
2277 DCHECK(range->Kind() == GENERAL_REGISTERS); 2234 DCHECK(range->Kind() == GENERAL_REGISTERS);
2278 assigned_registers_->Add(reg); 2235 assigned_registers_->Add(reg);
2279 } 2236 }
2280 range->set_assigned_register(reg, code_zone()); 2237 range->set_assigned_register(reg, code_zone());
2281 } 2238 }
2282 2239
2283 } 2240 }
2284 } 2241 }
2285 } // namespace v8::internal::compiler 2242 } // namespace v8::internal::compiler
OLDNEW
« no previous file with comments | « src/compiler/register-allocator.h ('k') | src/compiler/register-allocator-verifier.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698