Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(759)

Unified Diff: src/compiler/register-allocator-verifier.cc

Issue 704193007: [turbofan] add gap move verifier (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/compiler/register-allocator-verifier.cc
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index b8aefe1f12647154524928fd79b5f42104b3087e..c59e2ed78c6df68b16302f9b57bac7dc0c186871 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -14,41 +14,78 @@ static size_t OperandCount(const Instruction* instr) {
}
+static void VerifyGapEmpty(const GapInstruction* gap) {
+ for (int i = GapInstruction::FIRST_INNER_POSITION;
+ i <= GapInstruction::LAST_INNER_POSITION; i++) {
+ GapInstruction::InnerPosition inner_pos =
+ static_cast<GapInstruction::InnerPosition>(i);
+ CHECK_EQ(NULL, gap->GetParallelMove(inner_pos));
+ }
+}
+
+
+void RegisterAllocatorVerifier::VerifyInput(
+ const OperandConstraint& constraint) {
+ CHECK_NE(kSameAsFirst, constraint.type_);
+ if (constraint.type_ != kImmediate) {
+ CHECK_NE(UnallocatedOperand::kInvalidVirtualRegister,
+ constraint.virtual_register_);
+ }
+}
+
+
+void RegisterAllocatorVerifier::VerifyTemp(
+ const OperandConstraint& constraint) {
+ CHECK_NE(kSameAsFirst, constraint.type_);
+ CHECK_NE(kImmediate, constraint.type_);
+ CHECK_NE(kConstant, constraint.type_);
+ CHECK_EQ(UnallocatedOperand::kInvalidVirtualRegister,
+ constraint.virtual_register_);
+}
+
+
+void RegisterAllocatorVerifier::VerifyOutput(
+ const OperandConstraint& constraint) {
+ CHECK_NE(kImmediate, constraint.type_);
+ CHECK_NE(UnallocatedOperand::kInvalidVirtualRegister,
+ constraint.virtual_register_);
+}
+
+
RegisterAllocatorVerifier::RegisterAllocatorVerifier(
- Zone* zone, const InstructionSequence* sequence)
- : sequence_(sequence), constraints_(zone) {
+ Zone* zone, const RegisterConfiguration* config,
+ const InstructionSequence* sequence)
+ : zone_(zone), config_(config), sequence_(sequence), constraints_(zone) {
constraints_.reserve(sequence->instructions().size());
+ // TODO(dcarney): model unique constraints.
+ // Construct OperandConstraints for all InstructionOperands, eliminating
+ // kSameAsFirst along the way.
for (const auto* instr : sequence->instructions()) {
const size_t operand_count = OperandCount(instr);
auto* op_constraints =
zone->NewArray<OperandConstraint>(static_cast<int>(operand_count));
- // Construct OperandConstraints for all InstructionOperands, eliminating
- // kSameAsFirst along the way.
size_t count = 0;
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
BuildConstraint(instr->InputAt(i), &op_constraints[count]);
- CHECK_NE(kSameAsFirst, op_constraints[count].type_);
+ VerifyInput(op_constraints[count]);
+ }
+ for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
+ BuildConstraint(instr->TempAt(i), &op_constraints[count]);
+ VerifyTemp(op_constraints[count]);
}
for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
BuildConstraint(instr->OutputAt(i), &op_constraints[count]);
if (op_constraints[count].type_ == kSameAsFirst) {
CHECK(instr->InputCount() > 0);
- op_constraints[count] = op_constraints[0];
+ op_constraints[count].type_ = op_constraints[0].type_;
+ op_constraints[count].value_ = op_constraints[0].value_;
}
- }
- for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
- BuildConstraint(instr->TempAt(i), &op_constraints[count]);
- CHECK_NE(kSameAsFirst, op_constraints[count].type_);
+ VerifyOutput(op_constraints[count]);
}
// All gaps should be totally unallocated at this point.
if (instr->IsGapMoves()) {
- const auto* gap = GapInstruction::cast(instr);
- for (int i = GapInstruction::FIRST_INNER_POSITION;
- i <= GapInstruction::LAST_INNER_POSITION; i++) {
- GapInstruction::InnerPosition inner_pos =
- static_cast<GapInstruction::InnerPosition>(i);
- CHECK_EQ(NULL, gap->GetParallelMove(inner_pos));
- }
+ CHECK(operand_count == 0);
+ VerifyGapEmpty(GapInstruction::cast(instr));
}
InstructionConstraint instr_constraint = {instr, operand_count,
op_constraints};
@@ -70,12 +107,12 @@ void RegisterAllocatorVerifier::VerifyAssignment() {
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
CheckConstraint(instr->InputAt(i), &op_constraints[count]);
}
- for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
- CheckConstraint(instr->OutputAt(i), &op_constraints[count]);
- }
for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
CheckConstraint(instr->TempAt(i), &op_constraints[count]);
}
+ for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
+ CheckConstraint(instr->OutputAt(i), &op_constraints[count]);
+ }
++instr_it;
}
}
@@ -84,9 +121,11 @@ void RegisterAllocatorVerifier::VerifyAssignment() {
void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
OperandConstraint* constraint) {
constraint->value_ = kMinInt;
+ constraint->virtual_register_ = UnallocatedOperand::kInvalidVirtualRegister;
if (op->IsConstant()) {
constraint->type_ = kConstant;
constraint->value_ = ConstantOperand::cast(op)->index();
+ constraint->virtual_register_ = constraint->value_;
} else if (op->IsImmediate()) {
constraint->type_ = kImmediate;
constraint->value_ = ImmediateOperand::cast(op)->index();
@@ -94,6 +133,7 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
CHECK(op->IsUnallocated());
const auto* unallocated = UnallocatedOperand::cast(op);
int vreg = unallocated->virtual_register();
+ constraint->virtual_register_ = vreg;
if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
constraint->type_ = kFixedSlot;
constraint->value_ = unallocated->fixed_slot_index();
@@ -174,6 +214,194 @@ void RegisterAllocatorVerifier::CheckConstraint(
}
}
+
+namespace {
+
+struct OperandLess {
Jarin 2014/11/12 08:13:35 Just use a function here, no need to use a functor
dcarney 2014/11/12 08:53:31 std::map seemed to want this...
+ bool operator()(const InstructionOperand* a,
+ const InstructionOperand* b) const {
+ if (a->kind() == b->kind()) return a->index() < b->index();
+ return a->kind() < b->kind();
+ }
+};
+
+
+typedef std::map<
+ const InstructionOperand*, int, OperandLess,
+ zone_allocator<std::pair<const InstructionOperand*, const int>>>
+ LocationMap;
+
+
+struct OutgoingMapping : ZoneObject {
+ explicit OutgoingMapping(Zone* zone)
+ : locations_(LocationMap::key_compare(),
+ LocationMap::allocator_type(zone)) {}
+
+ void RunPhis(const InstructionBlock* block, const BlockStartInstruction* gap,
+ size_t index) {
+ // The first moves in the BlockStartInstruction are the phi moves inserted
+ // by ResolvePhis.
+ const ParallelMove* move = gap->GetParallelMove(GapInstruction::START);
+ CHECK_NE(nullptr, move);
+ const auto* move_ops = move->move_operands();
+ CHECK(block->phis().size() <= static_cast<size_t>(move_ops->length()));
+ auto move_it = move_ops->begin();
+ for (const auto* phi : block->phis()) {
+ const auto* op = move_it->source();
+ auto it = locations_.find(op);
+ CHECK(it != locations_.end());
+ CHECK_EQ(it->second, phi->operands()[index]);
+ it->second = phi->virtual_register();
+ ++move_it;
+ }
+ }
+
+ void RunGapInstruction(Zone* zone, const GapInstruction* gap) {
+ for (int i = GapInstruction::FIRST_INNER_POSITION;
+ i <= GapInstruction::LAST_INNER_POSITION; i++) {
+ GapInstruction::InnerPosition inner_pos =
+ static_cast<GapInstruction::InnerPosition>(i);
+ const ParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move == nullptr) continue;
+ RunParallelMoves(zone, move);
+ }
+ }
+
+ void RunParallelMoves(Zone* zone, const ParallelMove* move) {
+ // Compute outgoing mappings.
+ LocationMap to_insert((LocationMap::key_compare()),
+ LocationMap::allocator_type(zone));
+ auto* moves = move->move_operands();
+ for (auto i = moves->begin(); i != moves->end(); ++i) {
+ if (i->IsEliminated()) continue;
+ CHECK(i->source()->kind() != InstructionOperand::INVALID);
+ auto cur = locations_.find(i->source());
+ CHECK(cur != locations_.end());
+ if (i->destination()->kind() == InstructionOperand::INVALID) continue;
+ to_insert.insert(std::make_pair(i->destination(), cur->second));
+ }
+ // Drop current mappings.
+ for (auto i = moves->begin(); i != moves->end(); ++i) {
+ if (i->IsEliminated()) continue;
+ auto cur = locations_.find(i->destination());
+ if (cur != locations_.end()) locations_.erase(cur);
+ }
+ // Insert new values.
+ locations_.insert(to_insert.begin(), to_insert.end());
+ }
+
+ void Map(const InstructionOperand* op, int virtual_register) {
+ locations_.insert(std::make_pair(op, virtual_register));
+ }
+
+ void Drop(const InstructionOperand* op) {
+ auto it = locations_.find(op);
+ if (it != locations_.end()) locations_.erase(it);
+ }
+
+ void DropRegisters(const RegisterConfiguration* config) {
+ for (int i = 0; i < config->num_general_registers(); ++i) {
+ InstructionOperand op(InstructionOperand::REGISTER, i);
+ Drop(&op);
+ }
+ for (int i = 0; i < config->num_double_registers(); ++i) {
+ InstructionOperand op(InstructionOperand::DOUBLE_REGISTER, i);
+ Drop(&op);
+ }
+ }
+
+ LocationMap locations_;
+};
+
+} // namespace
+
+
Jarin 2014/11/12 08:13:35 Could we have a comment describing what we verify
dcarney 2014/11/12 08:53:32 Done.
+void RegisterAllocatorVerifier::VerifyGapMoves() {
+ typedef ZoneVector<OutgoingMapping*> OutgoingMappings;
+ OutgoingMappings outgoing_mappings(
+ static_cast<int>(sequence()->instruction_blocks().size()), nullptr,
+ zone());
+ // compute the locations of all virtual registers leaving every block, using
+ // only the first predecessor as the input locations.
+ int instr_index = 0;
+ size_t block_index = 0;
+ const auto* block = sequence()->instruction_blocks()[block_index];
+ for (const auto& instr_constraint : *constraints()) {
+ if (block->code_end() == instr_index) {
+ block_index++;
+ block = sequence()->instruction_blocks()[block_index];
+ }
+ auto* outgoing = outgoing_mappings[block_index];
Jarin 2014/11/12 08:13:35 Perhaps rename outgoing => current.
dcarney 2014/11/12 08:53:31 Done.
+ if (outgoing == nullptr) {
+ outgoing = new (zone()) OutgoingMapping(zone());
+ outgoing_mappings[block_index] = outgoing;
+ // Copy outgoing values from predecessor block.
+ if (!block->predecessors().empty()) {
+ size_t predecessor_index = block->predecessors()[0].ToSize();
Jarin 2014/11/12 08:13:35 Could you add the comment here that explains that
dcarney 2014/11/12 08:53:31 Done.
+ CHECK(predecessor_index < block_index);
+ auto* incoming = outgoing_mappings[predecessor_index];
Jarin 2014/11/12 08:13:35 Why is this working with loops? Is it because the
dcarney 2014/11/12 08:53:31 correct
+ if (block->PredecessorCount() > 1) {
+ // Update incoming map with phis. Valid because of edge split form.
Jarin 2014/11/12 08:13:35 Why do not we copy the incoming to the outgoing an
dcarney 2014/11/12 08:53:31 I need to do this to ensure that I can do RunPhis
+ CHECK(sequence()
+ ->instruction_blocks()[predecessor_index]
+ ->SuccessorCount() == 1);
+ const auto* start =
+ BlockStartInstruction::cast(instr_constraint.instruction_);
+ incoming->RunPhis(block, start, 0);
+ }
+ // Now initialize outgoing mapping for this block with incoming mapping.
+ outgoing->locations_.insert(incoming->locations_.begin(),
+ incoming->locations_.end());
+ }
+ }
+ // Update map with gaps and instruction operands.
+ const auto* instr = instr_constraint.instruction_;
+ const auto* op_constraints = instr_constraint.operand_constraints_;
+ size_t count = 0;
+ for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
+ if (op_constraints[count].type_ == kImmediate) continue;
+ auto it = outgoing->locations_.find(instr->InputAt(i));
+ int virtual_register = op_constraints[count].virtual_register_;
+ CHECK(it != outgoing->locations_.end());
+ CHECK_EQ(it->second, virtual_register);
+ }
+ for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
+ outgoing->Drop(instr->TempAt(i));
+ }
+ if (instr->IsCall()) {
+ outgoing->DropRegisters(config());
+ }
+ for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
+ outgoing->Drop(instr->OutputAt(i));
+ int virtual_register = op_constraints[count].virtual_register_;
+ outgoing->Map(instr->OutputAt(i), virtual_register);
+ }
+ if (instr->IsGapMoves()) {
+ const auto* gap = GapInstruction::cast(instr);
+ outgoing->RunGapInstruction(zone(), gap);
+ }
+ ++instr_index;
+ }
+ CHECK(++block_index == sequence()->instruction_blocks().size());
+ // Run all remaining phis.
+ for (const auto* block : sequence()->instruction_blocks()) {
+ if (block->predecessors().size() <= 1) continue;
+ const auto* start = BlockStartInstruction::cast(
+ sequence()->InstructionAt(block->code_start()));
+ for (size_t pred_index = 1; pred_index < block->predecessors().size();
+ ++pred_index) {
+ size_t pred_block_index = block->predecessors()[pred_index].ToSize();
+ auto* mapping = outgoing_mappings[pred_block_index];
+ CHECK(sequence()
+ ->instruction_blocks()[pred_block_index]
+ ->SuccessorCount() == 1);
+ mapping->RunPhis(block, start, pred_index);
+ // TODO(dcarney): run a verification that this mapping is the same as the
Jarin 2014/11/12 08:13:35 Yeah, it would be nice if we could apply the phis
dcarney 2014/11/12 08:53:31 I don't want to use this register allocator if I c
+ // mapping for the first predecessor w.r.t live values.
+ }
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8

Powered by Google App Engine
This is Rietveld 408576698