Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(125)

Unified Diff: src/compiler/register-allocator-verifier.cc

Issue 1855023002: [turbofan] New Register Allocator Validator (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/compiler/register-allocator-verifier.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/compiler/register-allocator-verifier.cc
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index f2160f52ce7a788530be617526b25f5a7d286740..eddcb3657a6a373ec56067f68aa4482b29d5ea6a 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -44,39 +44,17 @@ void VerifyAllocatedGaps(const Instruction* instr) {
} // namespace
-
-void RegisterAllocatorVerifier::VerifyInput(
- const OperandConstraint& constraint) {
- CHECK_NE(kSameAsFirst, constraint.type_);
- if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
- CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
- constraint.virtual_register_);
- }
-}
-
-
-void RegisterAllocatorVerifier::VerifyTemp(
- const OperandConstraint& constraint) {
- CHECK_NE(kSameAsFirst, constraint.type_);
- CHECK_NE(kImmediate, constraint.type_);
- CHECK_NE(kExplicit, constraint.type_);
- CHECK_NE(kConstant, constraint.type_);
-}
-
-
-void RegisterAllocatorVerifier::VerifyOutput(
- const OperandConstraint& constraint) {
- CHECK_NE(kImmediate, constraint.type_);
- CHECK_NE(kExplicit, constraint.type_);
- CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
- constraint.virtual_register_);
-}
-
-
RegisterAllocatorVerifier::RegisterAllocatorVerifier(
Zone* zone, const RegisterConfiguration* config,
const InstructionSequence* sequence)
- : zone_(zone), config_(config), sequence_(sequence), constraints_(zone) {
+ : zone_(zone),
+ config_(config),
+ sequence_(sequence),
+ constraints_(zone),
+ assessments_(zone),
+ outstanding_assessments_(zone),
+ worklist_(zone),
+ seen_(zone) {
constraints_.reserve(sequence->instructions().size());
// TODO(dcarney): model unique constraints.
// Construct OperandConstraints for all InstructionOperands, eliminating
@@ -111,6 +89,30 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
}
}
+void RegisterAllocatorVerifier::VerifyInput(
+ const OperandConstraint& constraint) {
+ CHECK_NE(kSameAsFirst, constraint.type_);
+ if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
+ CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
+ constraint.virtual_register_);
+ }
+}
+
+void RegisterAllocatorVerifier::VerifyTemp(
+ const OperandConstraint& constraint) {
+ CHECK_NE(kSameAsFirst, constraint.type_);
+ CHECK_NE(kImmediate, constraint.type_);
+ CHECK_NE(kExplicit, constraint.type_);
+ CHECK_NE(kConstant, constraint.type_);
+}
+
+void RegisterAllocatorVerifier::VerifyOutput(
+ const OperandConstraint& constraint) {
+ CHECK_NE(kImmediate, constraint.type_);
+ CHECK_NE(kExplicit, constraint.type_);
+ CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
+ constraint.virtual_register_);
+}
void RegisterAllocatorVerifier::VerifyAssignment() {
CHECK(sequence()->instructions().size() == constraints()->size());
@@ -138,7 +140,6 @@ void RegisterAllocatorVerifier::VerifyAssignment() {
}
}
-
void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
OperandConstraint* constraint) {
constraint->value_ = kMinInt;
@@ -204,7 +205,6 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
}
}
-
void RegisterAllocatorVerifier::CheckConstraint(
const InstructionOperand* op, const OperandConstraint* constraint) {
switch (constraint->type_) {
@@ -264,457 +264,217 @@ void RegisterAllocatorVerifier::CheckConstraint(
}
}
-namespace {
-
-typedef RpoNumber Rpo;
-
-static const int kInvalidVreg = InstructionOperand::kInvalidVirtualRegister;
-
-struct PhiData : public ZoneObject {
- PhiData(Rpo definition_rpo, const PhiInstruction* phi, int first_pred_vreg,
- const PhiData* first_pred_phi, Zone* zone)
- : definition_rpo(definition_rpo),
- virtual_register(phi->virtual_register()),
- first_pred_vreg(first_pred_vreg),
- first_pred_phi(first_pred_phi),
- operands(zone) {
- operands.reserve(phi->operands().size());
- operands.insert(operands.begin(), phi->operands().begin(),
- phi->operands().end());
- }
- const Rpo definition_rpo;
- const int virtual_register;
- const int first_pred_vreg;
- const PhiData* first_pred_phi;
- IntVector operands;
-};
-
-class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject {
- public:
- explicit PhiMap(Zone* zone) : ZoneMap<int, PhiData*>(zone) {}
-};
-
-struct OperandLess {
- bool operator()(const InstructionOperand* a,
- const InstructionOperand* b) const {
- return a->CompareCanonicalized(*b);
- }
-};
-
-class OperandMap : public ZoneObject {
- public:
- struct MapValue : public ZoneObject {
- MapValue()
- : incoming(nullptr),
- define_vreg(kInvalidVreg),
- use_vreg(kInvalidVreg),
- succ_vreg(kInvalidVreg) {}
- MapValue* incoming; // value from first predecessor block.
- int define_vreg; // valid if this value was defined in this block.
- int use_vreg; // valid if this value was used in this block.
- int succ_vreg; // valid if propagated back from successor block.
- };
-
- class Map
- : public ZoneMap<const InstructionOperand*, MapValue*, OperandLess> {
- public:
- explicit Map(Zone* zone)
- : ZoneMap<const InstructionOperand*, MapValue*, OperandLess>(zone) {}
-
- // Remove all entries with keys not in other.
- void Intersect(const Map& other) {
- if (this->empty()) return;
- auto it = this->begin();
- OperandLess less;
- for (const std::pair<const InstructionOperand*, MapValue*>& o : other) {
- while (less(it->first, o.first)) {
- this->erase(it++);
- if (it == this->end()) return;
- }
- if (it->first->EqualsCanonicalized(*o.first)) {
- ++it;
- if (it == this->end()) return;
- } else {
- CHECK(less(o.first, it->first));
- }
- }
- }
- };
-
- explicit OperandMap(Zone* zone) : map_(zone) {}
-
- Map& map() { return map_; }
+void BlockAssessments::PerformMoves(const Instruction* instruction) {
+ const ParallelMove* first =
+ instruction->GetParallelMove(Instruction::GapPosition::START);
+ PerformParallelMoves(first);
+ const ParallelMove* last =
+ instruction->GetParallelMove(Instruction::GapPosition::END);
+ PerformParallelMoves(last);
+}
- void RunParallelMoves(Zone* zone, const ParallelMove* moves) {
- // Compute outgoing mappings.
- Map to_insert(zone);
- for (const MoveOperands* move : *moves) {
- if (move->IsEliminated()) continue;
- auto cur = map().find(&move->source());
- CHECK(cur != map().end());
- auto res =
- to_insert.insert(std::make_pair(&move->destination(), cur->second));
- // Ensure injectivity of moves.
- CHECK(res.second);
- }
- // Drop current mappings.
- for (const MoveOperands* move : *moves) {
- if (move->IsEliminated()) continue;
- auto cur = map().find(&move->destination());
- if (cur != map().end()) map().erase(cur);
- }
- // Insert new values.
- map().insert(to_insert.begin(), to_insert.end());
+void BlockAssessments::PerformParallelMoves(const ParallelMove* moves) {
+ if (moves == nullptr) return;
+
+ CHECK(map_for_moves_.empty());
+ for (MoveOperands* move : *moves) {
+ if (move->IsEliminated() || move->IsRedundant()) continue;
+ auto it = map_.find(move->source());
+ // The RHS of a parallel move should have been already assessed.
+ CHECK(it != map_.end());
+ // The LHS of a parallel move should not have been assigned in this
+ // parallel move.
+ CHECK(map_for_moves_.find(move->destination()) == map_for_moves_.end());
+ // Copy the assessment to the destination.
+ map_for_moves_[move->destination()] = it->second;
}
-
- void RunGaps(Zone* zone, const Instruction* instr) {
- for (int i = Instruction::FIRST_GAP_POSITION;
- i <= Instruction::LAST_GAP_POSITION; i++) {
- Instruction::GapPosition inner_pos =
- static_cast<Instruction::GapPosition>(i);
- const ParallelMove* move = instr->GetParallelMove(inner_pos);
- if (move == nullptr) continue;
- RunParallelMoves(zone, move);
- }
+ for (auto pair : map_for_moves_) {
+ map_[pair.first] = pair.second;
}
+ map_for_moves_.clear();
+}
- void Drop(const InstructionOperand* op) {
- auto it = map().find(op);
- if (it != map().end()) map().erase(it);
+void BlockAssessments::DropRegisters() {
+ for (auto iterator = map().begin(), end = map().end(); iterator != end;) {
+ auto current = iterator;
+ ++iterator;
+ InstructionOperand op = current->first;
+ if (op.IsAnyRegister()) map().erase(current);
}
+}
- void DropRegisters(const RegisterConfiguration* config) {
- // TODO(dcarney): sort map by kind and drop range.
- for (auto it = map().begin(); it != map().end();) {
- const InstructionOperand* op = it->first;
- if (op->IsRegister() || op->IsDoubleRegister()) {
- map().erase(it++);
- } else {
- ++it;
+BlockAssessments* RegisterAllocatorVerifier::CreateForBlock(
+ const InstructionBlock* block) {
+ RpoNumber current_block_id = block->rpo_number();
+
+ BlockAssessments* ret = new (zone()) BlockAssessments(zone());
+ if (block->PredecessorCount() == 0) {
+ // TODO(mtrofin): the following check should hold, however, in certain
+ // unit tests it is invalidated by the last block. Investigate and
+ // normalize the CFG.
+ // CHECK(current_block_id.ToInt() == 0);
+ // The phi size test below is because we can, technically, have phi
+ // instructions with one argument. Some tests expose that, too.
+ } else if (block->PredecessorCount() == 1 && block->phis().size() == 0) {
+ const BlockAssessments* prev_block = assessments_[block->predecessors()[0]];
+ ret->CopyFrom(prev_block);
+ } else {
+ for (RpoNumber pred_id : block->predecessors()) {
+ // For every operand coming from any of the predecessors, create an
+ // Unfinalized assessment.
+ auto iterator = assessments_.find(pred_id);
+ if (iterator == assessments_.end()) {
+ // This block is the head of a loop, and this predecessor is the
+ // loopback
+ // arc.
+ // Validate this is a loop case, otherwise the CFG is malformed.
+ CHECK(pred_id >= current_block_id);
+ CHECK(block->IsLoopHeader());
+ continue;
+ }
+ const BlockAssessments* pred_assessments = iterator->second;
+ CHECK_NOT_NULL(pred_assessments);
+ for (auto pair : pred_assessments->map()) {
+ InstructionOperand operand = pair.first;
+ if (ret->map().find(operand) == ret->map().end()) {
+ ret->map().insert(std::make_pair(
+ operand, new (zone()) PendingAssessment(block, operand)));
+ }
}
}
}
+ return ret;
+}
- MapValue* Define(Zone* zone, const InstructionOperand* op,
- int virtual_register) {
- MapValue* value = new (zone) MapValue();
- value->define_vreg = virtual_register;
- auto res = map().insert(std::make_pair(op, value));
- if (!res.second) res.first->second = value;
- return value;
- }
-
- void Use(const InstructionOperand* op, int use_vreg, bool initial_pass) {
- auto it = map().find(op);
- CHECK(it != map().end());
- MapValue* v = it->second;
- if (v->define_vreg != kInvalidVreg) {
- CHECK_EQ(v->define_vreg, use_vreg);
- }
- // Already used this vreg in this block.
- if (v->use_vreg != kInvalidVreg) {
- CHECK_EQ(v->use_vreg, use_vreg);
- return;
- }
- if (!initial_pass) {
- // A value may be defined and used in this block or the use must have
- // propagated up.
- if (v->succ_vreg != kInvalidVreg) {
- CHECK_EQ(v->succ_vreg, use_vreg);
- } else {
- CHECK_EQ(v->define_vreg, use_vreg);
+void RegisterAllocatorVerifier::ValidatePendingAssessment(
+ RpoNumber block_id, InstructionOperand op, PendingAssessment* assessment,
+ int virtual_register) {
+ // When validating a pending assessment, it is possible some of the
+ // assessments
+ // for the original operand (the one where the assessment was created for
+ // first) are also pending. To avoid recursion, we use a work list. To
+ // deal with cycles, we keep a set of seen nodes.
+ CHECK(worklist_.empty());
+ CHECK(seen_.empty());
+ worklist_.push(std::make_pair(assessment, virtual_register));
+ seen_.insert(block_id);
+
+ while (!worklist_.empty()) {
+ auto work = worklist_.front();
+ PendingAssessment* current_assessment = work.first;
+ int current_virtual_register = work.second;
+ InstructionOperand current_operand = current_assessment->operand();
+ worklist_.pop();
+
+ const InstructionBlock* origin = current_assessment->origin();
+ CHECK(origin->PredecessorCount() > 1 || origin->phis().size() > 0);
+
+ // Check if the virtual register is a phi first, instead of relying on
+ // the incoming assessments. In particular, this handles the case
+ // v1 = phi v0 v0, which structurally is identical to v0 having been
+ // defined at the top of a diamond, and arriving at the node joining the
+ // diamond's branches.
+ const PhiInstruction* phi = nullptr;
+ for (const PhiInstruction* candidate : origin->phis()) {
+ if (candidate->virtual_register() == current_virtual_register) {
+ phi = candidate;
+ break;
}
- // Mark the use.
- it->second->use_vreg = use_vreg;
- return;
}
- // Go up block list and ensure the correct definition is reached.
- for (; v != nullptr; v = v->incoming) {
- // Value unused in block.
- if (v->define_vreg == kInvalidVreg && v->use_vreg == kInvalidVreg) {
- continue;
- }
- // Found correct definition or use.
- CHECK(v->define_vreg == use_vreg || v->use_vreg == use_vreg);
- // Mark the use.
- it->second->use_vreg = use_vreg;
- return;
- }
- // Use of a non-phi value without definition.
- CHECK(false);
- }
- void UsePhi(const InstructionOperand* op, const PhiData* phi,
- bool initial_pass) {
- auto it = map().find(op);
- CHECK(it != map().end());
- MapValue* v = it->second;
- int use_vreg = phi->virtual_register;
- // Phis are not defined.
- CHECK_EQ(kInvalidVreg, v->define_vreg);
- // Already used this vreg in this block.
- if (v->use_vreg != kInvalidVreg) {
- CHECK_EQ(v->use_vreg, use_vreg);
- return;
- }
- if (!initial_pass) {
- // A used phi must have propagated its use to a predecessor.
- CHECK_EQ(v->succ_vreg, use_vreg);
- // Mark the use.
- v->use_vreg = use_vreg;
- return;
- }
- // Go up the block list starting at the first predecessor and ensure this
- // phi has a correct use or definition.
- for (v = v->incoming; v != nullptr; v = v->incoming) {
- // Value unused in block.
- if (v->define_vreg == kInvalidVreg && v->use_vreg == kInvalidVreg) {
- continue;
- }
- // Found correct definition or use.
- if (v->define_vreg != kInvalidVreg) {
- CHECK(v->define_vreg == phi->first_pred_vreg);
- } else if (v->use_vreg != phi->first_pred_vreg) {
- // Walk the phi chain, hunting for a matching phi use.
- const PhiData* p = phi;
- for (; p != nullptr; p = p->first_pred_phi) {
- if (p->virtual_register == v->use_vreg) break;
+ int op_index = 0;
+ for (RpoNumber pred : origin->predecessors()) {
+ int expected =
+ phi != nullptr ? phi->operands()[op_index] : current_virtual_register;
+
+ ++op_index;
+ auto pred_assignment = assessments_.find(pred);
+ if (pred_assignment == assessments_.end()) {
+ CHECK(origin->IsLoopHeader());
+ auto todo_iter = outstanding_assessments_.find(pred);
+ DelayedAssessments* set = nullptr;
+ if (todo_iter == outstanding_assessments_.end()) {
+ set = new (zone()) DelayedAssessments(zone());
+ outstanding_assessments_.insert(std::make_pair(pred, set));
+ } else {
+ set = todo_iter->second;
}
- CHECK(p);
+ set->AddDelayedAssessment(current_operand, expected);
+ continue;
}
- // Mark the use.
- it->second->use_vreg = use_vreg;
- return;
- }
- // Use of a phi value without definition.
- UNREACHABLE();
- }
-
- private:
- Map map_;
- DISALLOW_COPY_AND_ASSIGN(OperandMap);
-};
-} // namespace
+ const BlockAssessments* pred_assessments = pred_assignment->second;
+ auto found_contribution = pred_assessments->map().find(current_operand);
+ CHECK(found_contribution != pred_assessments->map().end());
+ Assessment* contribution = found_contribution->second;
-
-class RegisterAllocatorVerifier::BlockMaps {
- public:
- BlockMaps(Zone* zone, const InstructionSequence* sequence)
- : zone_(zone),
- sequence_(sequence),
- phi_map_guard_(sequence->VirtualRegisterCount(), zone),
- phi_map_(zone),
- incoming_maps_(zone),
- outgoing_maps_(zone) {
- InitializePhis();
- InitializeOperandMaps();
- }
-
- bool IsPhi(int virtual_register) {
- return phi_map_guard_.Contains(virtual_register);
- }
-
- const PhiData* GetPhi(int virtual_register) {
- auto it = phi_map_.find(virtual_register);
- CHECK(it != phi_map_.end());
- return it->second;
- }
-
- OperandMap* InitializeIncoming(size_t block_index, bool initial_pass) {
- return initial_pass ? InitializeFromFirstPredecessor(block_index)
- : InitializeFromIntersection(block_index);
- }
-
- void PropagateUsesBackwards() {
- typedef std::set<size_t, std::greater<size_t>, zone_allocator<size_t>>
- BlockIds;
- BlockIds block_ids((BlockIds::key_compare()),
- zone_allocator<size_t>(zone()));
- // First ensure that incoming contains only keys in all predecessors.
- for (const InstructionBlock* block : sequence()->instruction_blocks()) {
- size_t index = block->rpo_number().ToSize();
- block_ids.insert(index);
- OperandMap::Map& succ_map = incoming_maps_[index]->map();
- for (size_t i = 0; i < block->PredecessorCount(); ++i) {
- RpoNumber pred_rpo = block->predecessors()[i];
- succ_map.Intersect(outgoing_maps_[pred_rpo.ToSize()]->map());
- }
- }
- // Back propagation fixpoint.
- while (!block_ids.empty()) {
- // Pop highest block_id.
- auto block_id_it = block_ids.begin();
- const size_t succ_index = *block_id_it;
- block_ids.erase(block_id_it);
- // Propagate uses back to their definition blocks using succ_vreg.
- const InstructionBlock* block =
- sequence()->instruction_blocks()[succ_index];
- OperandMap::Map& succ_map = incoming_maps_[succ_index]->map();
- for (size_t i = 0; i < block->PredecessorCount(); ++i) {
- for (auto& succ_val : succ_map) {
- // An incoming map contains no defines.
- CHECK_EQ(kInvalidVreg, succ_val.second->define_vreg);
- // Compute succ_vreg.
- int succ_vreg = succ_val.second->succ_vreg;
- if (succ_vreg == kInvalidVreg) {
- succ_vreg = succ_val.second->use_vreg;
- // Initialize succ_vreg in back propagation chain.
- succ_val.second->succ_vreg = succ_vreg;
- }
- if (succ_vreg == kInvalidVreg) continue;
- // May need to transition phi.
- if (IsPhi(succ_vreg)) {
- const PhiData* phi = GetPhi(succ_vreg);
- if (phi->definition_rpo.ToSize() == succ_index) {
- // phi definition block, transition to pred value.
- succ_vreg = phi->operands[i];
- }
- }
- // Push succ_vreg up to all predecessors.
- RpoNumber pred_rpo = block->predecessors()[i];
- OperandMap::Map& pred_map = outgoing_maps_[pred_rpo.ToSize()]->map();
- auto& pred_val = *pred_map.find(succ_val.first);
- if (pred_val.second->use_vreg != kInvalidVreg) {
- CHECK_EQ(succ_vreg, pred_val.second->use_vreg);
- }
- if (pred_val.second->define_vreg != kInvalidVreg) {
- CHECK_EQ(succ_vreg, pred_val.second->define_vreg);
- }
- if (pred_val.second->succ_vreg != kInvalidVreg) {
- if (succ_vreg != pred_val.second->succ_vreg) {
- // When a block introduces 2 identical phis A and B, and both are
- // operands to other phis C and D, and we optimized the moves
- // defining A or B such that they now appear in the block defining
- // A and B, the back propagation will get confused when visiting
- // upwards from C and D. The operand in the block defining A and B
- // will be attributed to C (or D, depending which of these is
- // visited first).
- CHECK(IsPhi(pred_val.second->succ_vreg));
- CHECK(IsPhi(succ_vreg));
- const PhiData* current_phi = GetPhi(succ_vreg);
- const PhiData* assigned_phi = GetPhi(pred_val.second->succ_vreg);
- CHECK_EQ(current_phi->operands.size(),
- assigned_phi->operands.size());
- CHECK_EQ(current_phi->definition_rpo,
- assigned_phi->definition_rpo);
- for (size_t i = 0; i < current_phi->operands.size(); ++i) {
- CHECK_EQ(current_phi->operands[i], assigned_phi->operands[i]);
- }
- }
- } else {
- pred_val.second->succ_vreg = succ_vreg;
- block_ids.insert(pred_rpo.ToSize());
+ switch (contribution->kind()) {
+ case Final:
+ CHECK_EQ(FinalAssessment::cast(contribution)->virtual_register(),
+ expected);
+ break;
+ case Pending: {
+ // This happens if we have a diamond feeding into another one, and
+ // the inner one never being used - other than for carrying the value.
+ PendingAssessment* next = PendingAssessment::cast(contribution);
+ if (seen_.find(pred) == seen_.end()) {
+ worklist_.push({next, expected});
+ seen_.insert(pred);
}
+ // Note that we do not want to finalize pending assessments at the
+ // beginning of a block - which is the information we'd have
+ // available here. This is because this operand may be reused to
+ // define
+ // duplicate phis.
+ break;
}
}
}
- // Clear uses and back links for second pass.
- for (OperandMap* operand_map : incoming_maps_) {
- for (auto& succ_val : operand_map->map()) {
- succ_val.second->incoming = nullptr;
- succ_val.second->use_vreg = kInvalidVreg;
- }
- }
- }
-
- private:
- OperandMap* InitializeFromFirstPredecessor(size_t block_index) {
- OperandMap* to_init = outgoing_maps_[block_index];
- CHECK(to_init->map().empty());
- const InstructionBlock* block =
- sequence()->instruction_blocks()[block_index];
- if (block->predecessors().empty()) return to_init;
- size_t predecessor_index = block->predecessors()[0].ToSize();
- // Ensure not a backedge.
- CHECK(predecessor_index < block->rpo_number().ToSize());
- OperandMap* incoming = outgoing_maps_[predecessor_index];
- // Copy map and replace values.
- to_init->map() = incoming->map();
- for (auto& it : to_init->map()) {
- OperandMap::MapValue* incoming = it.second;
- it.second = new (zone()) OperandMap::MapValue();
- it.second->incoming = incoming;
- }
- // Copy to incoming map for second pass.
- incoming_maps_[block_index]->map() = to_init->map();
- return to_init;
- }
-
- OperandMap* InitializeFromIntersection(size_t block_index) {
- return incoming_maps_[block_index];
- }
-
- void InitializeOperandMaps() {
- size_t block_count = sequence()->instruction_blocks().size();
- incoming_maps_.reserve(block_count);
- outgoing_maps_.reserve(block_count);
- for (size_t i = 0; i < block_count; ++i) {
- incoming_maps_.push_back(new (zone()) OperandMap(zone()));
- outgoing_maps_.push_back(new (zone()) OperandMap(zone()));
- }
}
+ seen_.clear();
+ CHECK(worklist_.empty());
+}
- void InitializePhis() {
- const size_t block_count = sequence()->instruction_blocks().size();
- for (size_t block_index = 0; block_index < block_count; ++block_index) {
- const InstructionBlock* block =
- sequence()->instruction_blocks()[block_index];
- for (const PhiInstruction* phi : block->phis()) {
- int first_pred_vreg = phi->operands()[0];
- const PhiData* first_pred_phi = nullptr;
- if (IsPhi(first_pred_vreg)) {
- first_pred_phi = GetPhi(first_pred_vreg);
- first_pred_vreg = first_pred_phi->first_pred_vreg;
- }
- CHECK(!IsPhi(first_pred_vreg));
- PhiData* phi_data = new (zone()) PhiData(
- block->rpo_number(), phi, first_pred_vreg, first_pred_phi, zone());
- auto res =
- phi_map_.insert(std::make_pair(phi->virtual_register(), phi_data));
- CHECK(res.second);
- phi_map_guard_.Add(phi->virtual_register());
- }
+void RegisterAllocatorVerifier::ValidateUse(
+ RpoNumber block_id, BlockAssessments* current_assessments,
+ InstructionOperand op, int virtual_register) {
+ auto iterator = current_assessments->map().find(op);
+ // We should have seen this operand before.
+ CHECK(iterator != current_assessments->map().end());
+ Assessment* assessment = iterator->second;
+
+ switch (assessment->kind()) {
+ case Final:
+ // The virtual registers should match.
+ CHECK_EQ(FinalAssessment::cast(assessment)->virtual_register(),
+ virtual_register);
+ break;
+ case Pending: {
+ ValidatePendingAssessment(
+ block_id, op, PendingAssessment::cast(assessment), virtual_register);
+ // If everything checks out, we may make the assessment.
+ current_assessments->map()[op] =
+ new (zone()) FinalAssessment(virtual_register);
+ break;
}
}
-
- typedef ZoneVector<OperandMap*> OperandMaps;
- typedef ZoneVector<PhiData*> PhiVector;
-
- Zone* zone() const { return zone_; }
- const InstructionSequence* sequence() const { return sequence_; }
-
- Zone* const zone_;
- const InstructionSequence* const sequence_;
- BitVector phi_map_guard_;
- PhiMap phi_map_;
- OperandMaps incoming_maps_;
- OperandMaps outgoing_maps_;
-};
-
-
-void RegisterAllocatorVerifier::VerifyGapMoves() {
- BlockMaps block_maps(zone(), sequence());
- VerifyGapMoves(&block_maps, true);
- block_maps.PropagateUsesBackwards();
- VerifyGapMoves(&block_maps, false);
}
-
-// Compute and verify outgoing values for every block.
-void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
- bool initial_pass) {
+void RegisterAllocatorVerifier::VerifyGapMoves() {
+ CHECK(assessments_.empty());
+ CHECK(outstanding_assessments_.empty());
const size_t block_count = sequence()->instruction_blocks().size();
for (size_t block_index = 0; block_index < block_count; ++block_index) {
- OperandMap* current =
- block_maps->InitializeIncoming(block_index, initial_pass);
const InstructionBlock* block =
sequence()->instruction_blocks()[block_index];
+ BlockAssessments* block_assessments = CreateForBlock(block);
+
for (int instr_index = block->code_start(); instr_index < block->code_end();
++instr_index) {
const InstructionConstraint& instr_constraint = constraints_[instr_index];
const Instruction* instr = instr_constraint.instruction_;
- current->RunGaps(zone(), instr);
+ block_assessments->PerformMoves(instr);
+
const OperandConstraint* op_constraints =
instr_constraint.operand_constraints_;
size_t count = 0;
@@ -724,24 +484,19 @@ void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
continue;
}
int virtual_register = op_constraints[count].virtual_register_;
- const InstructionOperand* op = instr->InputAt(i);
- if (!block_maps->IsPhi(virtual_register)) {
- current->Use(op, virtual_register, initial_pass);
- } else {
- const PhiData* phi = block_maps->GetPhi(virtual_register);
- current->UsePhi(op, phi, initial_pass);
- }
+ InstructionOperand op = *instr->InputAt(i);
+ ValidateUse(block->rpo_number(), block_assessments, op,
+ virtual_register);
}
for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
- current->Drop(instr->TempAt(i));
+ block_assessments->Drop(*instr->TempAt(i));
}
if (instr->IsCall()) {
- current->DropRegisters(config());
+ block_assessments->DropRegisters();
}
for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
int virtual_register = op_constraints[count].virtual_register_;
- OperandMap::MapValue* value =
- current->Define(zone(), instr->OutputAt(i), virtual_register);
+ block_assessments->AddDefinition(*instr->OutputAt(i), virtual_register);
if (op_constraints[count].type_ == kRegisterAndSlot) {
const AllocatedOperand* reg_op =
AllocatedOperand::cast(instr->OutputAt(i));
@@ -749,13 +504,35 @@ void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
const AllocatedOperand* stack_op = AllocatedOperand::New(
zone(), LocationOperand::LocationKind::STACK_SLOT, rep,
op_constraints[i].spilled_slot_);
- auto insert_result =
- current->map().insert(std::make_pair(stack_op, value));
- DCHECK(insert_result.second);
- USE(insert_result);
+ block_assessments->AddDefinition(*stack_op, virtual_register);
}
}
}
+ // Now commit the assessments for this block. If there are any delayed
+ // assessments, ValidatePendingAssessment should see this block, too.
+ assessments_[block->rpo_number()] = block_assessments;
+
+ auto todo_iter = outstanding_assessments_.find(block->rpo_number());
+ if (todo_iter == outstanding_assessments_.end()) continue;
+ DelayedAssessments* todo = todo_iter->second;
+ for (auto pair : todo->map()) {
+ InstructionOperand op = pair.first;
+ int vreg = pair.second;
+ auto found_op = block_assessments->map().find(op);
+ CHECK(found_op != block_assessments->map().end());
+ switch (found_op->second->kind()) {
+ case Final:
+ CHECK(FinalAssessment::cast(found_op->second)->virtual_register() ==
+ vreg);
+ break;
+ case Pending:
+ ValidatePendingAssessment(block->rpo_number(), op,
+ PendingAssessment::cast(found_op->second),
+ vreg);
+ block_assessments->map()[op] = new (zone()) FinalAssessment(vreg);
+ break;
+ }
+ }
}
}
« no previous file with comments | « src/compiler/register-allocator-verifier.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698