Chromium Code Reviews| Index: src/interpreter/bytecode-register-optimizer.cc |
| diff --git a/src/interpreter/bytecode-register-optimizer.cc b/src/interpreter/bytecode-register-optimizer.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..4bec9a68d4614512eea62aa9bf9161287cfe8cb1 |
| --- /dev/null |
| +++ b/src/interpreter/bytecode-register-optimizer.cc |
| @@ -0,0 +1,561 @@ |
| +// Copyright 2015 the V8 project authors. All rights reserved. |
|
rmcilroy
2016/05/26 10:24:53
2016
oth
2016/05/26 21:26:51
Done.
|
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "src/interpreter/bytecode-register-optimizer.h" |
| + |
| +namespace v8 { |
| +namespace internal { |
| +namespace interpreter { |
| + |
| +// A class for tracking the state of a register. In particular, it |
| +// tracks whether one register is valuewise equivalent to another, and |
| +// also whether a register is materialized in the bytecode stream. |
| +class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject { |
| + public: |
| + RegisterInfo(Register reg, int equivalence_id, bool materialized) |
| + : register_(reg), |
| + equivalence_id_(equivalence_id), |
| + materialized_(materialized), |
| + next_(this), |
| + prev_(this) {} |
| + |
| + void AddToEquivalenceSetOf(RegisterInfo* info); |
| + void RemoveFromEquivalenceSet(); |
| + bool IsOnlyEquivalent() const; |
| + bool IsOnlyMaterializedEquivalent() const; |
| + bool IsEquivalent(RegisterInfo* info) const; |
| + |
| + RegisterInfo* GetMaterializedEquivalent(); |
| + RegisterInfo* GetMaterializedEquivalentOtherThan(Register reg); |
| + RegisterInfo* GetMaterializationCandidate(); |
|
rmcilroy
2016/05/26 10:24:52
I think these functions deserve a comment, particu
oth
2016/05/26 21:26:51
Done.
|
| + |
| + Register register_value() const { return register_; } |
| + bool materialized() const { return materialized_; } |
| + void set_materialized(bool materialized) { materialized_ = materialized; } |
| + void set_equivalence_id(int equivalence_id) { |
| + equivalence_id_ = equivalence_id; |
| + } |
| + int equivalence_id() const { return equivalence_id_; } |
| + RegisterInfo* next() const { return next_; } |
| + |
| + private: |
| + Register register_; |
| + int equivalence_id_; |
|
rmcilroy
2016/05/26 10:24:53
This should probably be an unsigned long, and mayb
oth
2016/05/26 21:26:50
Done.
|
| + bool materialized_; |
| + |
| + // Equivalence set pointers. |
| + RegisterInfo* next_; |
| + RegisterInfo* prev_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(RegisterInfo); |
| +}; |
| + |
| +void BytecodeRegisterOptimizer::RegisterInfo::AddToEquivalenceSetOf( |
| + RegisterInfo* info) { |
| + DCHECK(!IsEquivalent(info) && IsOnlyEquivalent()); |
| + this->next_ = info->next_; |
| + this->prev_ = info; |
| + this->prev_->next_ = this; |
| + this->next_->prev_ = this; |
| + set_equivalence_id(info->equivalence_id()); |
| +} |
| + |
| +void BytecodeRegisterOptimizer::RegisterInfo::RemoveFromEquivalenceSet() { |
| + this->next_->prev_ = this->prev_; |
| + this->prev_->next_ = this->next_; |
| + this->next_ = this->prev_ = this; |
| +} |
| + |
| +bool BytecodeRegisterOptimizer::RegisterInfo::IsOnlyEquivalent() const { |
|
rmcilroy
2016/05/26 10:24:53
This name is a bit confusing to me, how about IsUn
oth
2016/05/26 21:26:51
Agreed. For readability, perhaps IsOnlyMemberOfEqu
rmcilroy
2016/05/27 09:18:58
SGTM, thanks.
|
| + return this->next_ == this; |
| +} |
| + |
| +bool BytecodeRegisterOptimizer::RegisterInfo::IsOnlyMaterializedEquivalent() |
| + const { |
| + DCHECK(materialized()); |
| + |
| + const RegisterInfo* visitor = this->next_; |
| + while (visitor != this) { |
| + if (visitor->materialized()) { |
| + return false; |
| + } |
| + visitor = visitor->next_; |
| + } |
| + return true; |
| +} |
| + |
| +bool BytecodeRegisterOptimizer::RegisterInfo::IsEquivalent( |
| + RegisterInfo* info) const { |
| + return equivalence_id() == info->equivalence_id(); |
| +} |
| + |
| +BytecodeRegisterOptimizer::RegisterInfo* |
| +BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalent() { |
| + RegisterInfo* visitor = this; |
| + do { |
| + if (visitor->materialized()) { |
| + return visitor; |
| + } |
| + visitor = visitor->next_; |
| + } while (visitor != this); |
| + |
| + return nullptr; |
| +} |
| + |
| +BytecodeRegisterOptimizer::RegisterInfo* |
| +BytecodeRegisterOptimizer::RegisterInfo::GetMaterializationCandidate() { |
|
rmcilroy
2016/05/26 10:24:53
It is difficult to figure out what this function d
oth
2016/05/26 21:26:50
GetEquivalentToMaterialize()?
rmcilroy
2016/05/27 09:18:58
SGTM, thanks.
|
| + DCHECK(this->materialized()); |
| + RegisterInfo* visitor = this->next_; |
| + Register best_reg; |
|
rmcilroy
2016/05/26 10:24:53
unused
oth
2016/05/26 21:26:51
Done.
|
| + RegisterInfo* best_info = nullptr; |
| + while (visitor != this) { |
| + if (visitor->materialized()) { |
| + return nullptr; |
| + } |
| + if (best_info == nullptr || |
| + visitor->register_value() < best_info->register_value()) { |
| + best_info = visitor; |
| + } |
| + visitor = visitor->next_; |
| + } |
| + return best_info; |
| +} |
| + |
| +BytecodeRegisterOptimizer::BytecodeRegisterOptimizer( |
| + Zone* zone, TemporaryRegisterAllocator* register_allocator, |
| + int parameter_count, int fixed_register_count, |
| + BytecodePipelineStage* next_stage) |
| + : accumulator_(Register::bytecode_array()), |
|
rmcilroy
2016/05/26 10:24:52
We don't use bytecode_array in bytecode, but I won
oth
2016/05/26 21:26:51
Done.
It was implemented in this way to keep the
|
| + temporary_base_(register_allocator->allocation_base()), |
| + register_map_(zone), |
| + equivalence_id_(0), |
| + next_stage_(next_stage), |
| + flushed_(false), |
| + zone_(zone) { |
| + register_allocator->set_observer(this); |
| + |
| + // Calculate offset so register index values can be mapped into |
| + // a vector of register metadata. |
| + if (parameter_count != 0) { |
| + map_index_offset_ = |
| + -Register::FromParameterIndex(0, parameter_count).index(); |
| + } else { |
| + map_index_offset_ = -accumulator_.index(); |
|
rmcilroy
2016/05/26 10:24:53
Does this side ever get hit? We should always have
oth
2016/05/26 21:26:50
Yes, it gets hit in tests that craft bytecode.
Th
rmcilroy
2016/05/27 09:18:58
SG, thanks
|
| + } |
| + |
| + // Initialize register map for parameters, locals, and the |
| + // accumulator. |
| + register_map_.resize(map_index_offset_ + |
| + static_cast<size_t>(fixed_register_count)); |
|
rmcilroy
2016/05/26 10:24:53
Isn't fixed_register_count the same (or less than)
oth
2016/05/26 21:26:50
Done.
|
| + for (size_t i = 0; i < register_map_.size(); ++i) { |
| + register_map_[i] = new (zone) |
| + RegisterInfo(RegisterFromMapIndex(i), equivalence_id_++, true); |
| + DCHECK_EQ(register_map_[i]->register_value().index(), |
| + RegisterFromMapIndex(i).index()); |
| + } |
| + accumulator_info_ = GetRegisterInfo(accumulator_); |
| + DCHECK(accumulator_info_->register_value() == accumulator_); |
| +} |
| + |
| +void BytecodeRegisterOptimizer::FlushState() { |
| + if (flushed_) { |
| + return; |
| + } |
| + |
| + // Materialize all live registers. |
| + size_t count = register_map_.size(); |
| + for (size_t i = 0; i < count; ++i) { |
| + RegisterInfo* reg_info = register_map_[i]; |
| + if (!reg_info->materialized() && !reg_info->IsOnlyEquivalent()) { |
| + DCHECK(RegisterIsTemporary(reg_info->register_value()) || |
| + reg_info->register_value() == accumulator_); |
| + Materialize(reg_info); |
| + } |
| + } |
| + |
| + // Break all existing equivalences. |
| + for (size_t i = 0; i < count; ++i) { |
| + RegisterInfo* reg_info = register_map_[i]; |
| + if (!reg_info->IsOnlyEquivalent()) { |
| + reg_info->RemoveFromEquivalenceSet(); |
| + reg_info->set_equivalence_id(equivalence_id_++); |
|
rmcilroy
2016/05/26 10:24:53
nit - move the setting of equivalence_id into Remo
oth
2016/05/26 21:26:50
Done.
|
| + } |
| + } |
| + flushed_ = true; |
| +} |
| + |
| +// override |
| +void BytecodeRegisterOptimizer::FlushBasicBlock() { |
| + FlushState(); |
| + next_stage_->FlushBasicBlock(); |
| +} |
| + |
| +// override |
| +size_t BytecodeRegisterOptimizer::FlushForOffset() { |
| + FlushState(); |
| + return next_stage_->FlushForOffset(); |
| +} |
| + |
| +void BytecodeRegisterOptimizer::EmitNopTakingSourcePosition( |
| + BytecodeNode* node) { |
| + BytecodeNode nop(Bytecode::kNop); |
| + nop.source_info().Update(node->source_info()); |
| + WriteToNextStage(&nop); |
| + node->source_info().set_invalid(); |
| +} |
| + |
| +// override |
| +void BytecodeRegisterOptimizer::Write(BytecodeNode* node) { |
| + flushed_ = false; |
| + |
| + // |
| + // The logic for register to register transfer here emits Nops with the |
| + // current source position. |
|
rmcilroy
2016/05/26 10:24:53
I'm not sure what this adds anything beyond the tw
oth
2016/05/26 21:26:50
Done.
|
| + // |
| + // Transfers with observable registers as the destination will be |
| + // immediately materialized so the source position information will |
| + // be ordered correctly. |
| + // |
| + // Transfers without observable destination registers will initially |
| + // be emitted as Nop's with the source position. They may, or may |
| + // not, be materialized by the optimizer. However, the source |
| + // position is not lost and being attachd to a Nop is fine as the |
|
rmcilroy
2016/05/26 10:24:53
attached
oth
2016/05/26 21:26:51
Done.
|
| + // destination register is not observable in the debugger. |
| + // |
| + switch (node->bytecode()) { |
| + case Bytecode::kLdar: { |
| + EmitNopTakingSourcePosition(node); |
|
rmcilroy
2016/05/26 10:24:53
nit - move into DoLdar / DoStar / DoMov.
Optional
oth
2016/05/26 21:26:50
Done. Tried the experiment with the source positio
|
| + DoLdar(node); |
| + return; |
| + } |
| + case Bytecode::kStar: { |
| + EmitNopTakingSourcePosition(node); |
| + DoStar(node); |
| + return; |
| + } |
| + case Bytecode::kMov: { |
| + EmitNopTakingSourcePosition(node); |
| + DoMov(node); |
| + return; |
| + } |
| + default: |
| + break; |
| + } |
| + |
| + if (Bytecodes::IsJump(node->bytecode()) || |
| + node->bytecode() == Bytecode::kDebugger) { |
| + // The debugger can manipulate locals and parameters, flush |
| + // everything before handing over to it. Similarly, all state must |
| + // be flushed before emitting a jump due to how bytecode offsets |
| + // for jumps are evaluated. |
| + FlushState(); |
| + WriteToNextStage(node); |
| + return; |
|
rmcilroy
2016/05/26 10:24:53
nit - just do the FlushState here and fall-through
oth
2016/05/26 21:26:51
Done.
|
| + } |
| + |
| + PrepareOperands(node); |
| + WriteToNextStage(node); |
| +} |
| + |
| +void BytecodeRegisterOptimizer::WriteToNextStage(BytecodeNode* node) { |
| + next_stage_->Write(node); |
| +} |
| + |
| +void BytecodeRegisterOptimizer::OutputRegisterTransfer( |
| + RegisterInfo* input_info, RegisterInfo* output_info) { |
| + Register input = input_info->register_value(); |
| + Register output = output_info->register_value(); |
| + DCHECK_NE(input.index(), output.index()); |
| + |
| + if (input == accumulator_) { |
| + uint32_t operand = static_cast<uint32_t>(output.ToOperand()); |
| + OperandScale scale = Bytecodes::OperandSizesToScale(output.SizeOfOperand()); |
| + BytecodeNode node(Bytecode::kStar, operand, scale); |
| + WriteToNextStage(&node); |
| + } else if (output == accumulator_) { |
| + uint32_t operand = static_cast<uint32_t>(input.ToOperand()); |
| + OperandScale scale = Bytecodes::OperandSizesToScale(input.SizeOfOperand()); |
| + BytecodeNode node(Bytecode::kLdar, operand, scale); |
| + WriteToNextStage(&node); |
| + } else { |
| + uint32_t operand0 = static_cast<uint32_t>(input.ToOperand()); |
| + uint32_t operand1 = static_cast<uint32_t>(output.ToOperand()); |
| + OperandScale scale = Bytecodes::OperandSizesToScale(input.SizeOfOperand(), |
| + output.SizeOfOperand()); |
| + BytecodeNode node(Bytecode::kMov, operand0, operand1, scale); |
| + WriteToNextStage(&node); |
| + } |
| +} |
| + |
| +void BytecodeRegisterOptimizer::CreateMaterializedEquivalentIfRequired( |
| + RegisterInfo* info) { |
| + if (info->materialized()) { |
| + RegisterInfo* unmaterialized = info->GetMaterializationCandidate(); |
| + if (unmaterialized) { |
| + OutputRegisterTransfer(info, unmaterialized); |
| + unmaterialized->set_materialized(true); |
| + } |
| + } |
| +} |
| + |
| +BytecodeRegisterOptimizer::RegisterInfo* |
| +BytecodeRegisterOptimizer::GetMaterializedEquivalent(RegisterInfo* info) { |
| + return info->materialized() ? info : info->GetMaterializedEquivalent(); |
| +} |
| + |
| +BytecodeRegisterOptimizer::RegisterInfo* |
| +BytecodeRegisterOptimizer::GetMaterializedEquivalentNotAccumulator( |
| + RegisterInfo* info) { |
| + if (info->materialized()) { |
| + return info; |
| + } |
| + |
| + RegisterInfo* result = info->GetMaterializedEquivalent(); |
|
rmcilroy
2016/05/26 10:24:53
Maybe just add an option to GetMaterializedEquival
oth
2016/05/26 21:26:51
Done.
|
| + if (result->register_value() == accumulator_) { |
| + result = result->next()->GetMaterializedEquivalent(); |
| + if (result->register_value() == accumulator_) { |
| + Materialize(info); |
| + result = info; |
| + } |
| + } |
| + return result; |
| +} |
| + |
| +void BytecodeRegisterOptimizer::Materialize(RegisterInfo* info) { |
| + if (!info->materialized()) { |
| + RegisterInfo* materialized = info->GetMaterializedEquivalent(); |
| + OutputRegisterTransfer(materialized, info); |
| + info->set_materialized(true); |
| + } |
| +} |
| + |
| +void BytecodeRegisterOptimizer::RegisterTransfer(RegisterInfo* input_info, |
| + RegisterInfo* output_info) { |
| + // Ensure value associated with output is materialized into a |
| + // different register if necessary. |
| + CreateMaterializedEquivalentIfRequired(output_info); |
| + bool output_is_observable = |
| + RegisterIsObservable(output_info->register_value()); |
| + |
| + // If the output and input are not equivalent or the output is |
| + // visible to the user/debugger, update equivalence set and |
| + // potentially emit store if the transfer is observable. |
| + if (!output_info->IsEquivalent(input_info) || output_is_observable) { |
| + // Remove from current set and invalidate equivalence id. |
| + output_info->RemoveFromEquivalenceSet(); |
| + output_info->set_equivalence_id(equivalence_id_++); |
| + |
| + // Add to the equivalence set of |input_info|. |
| + RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent(); |
| + output_info->AddToEquivalenceSetOf(materialized_info); |
| + output_info->set_materialized(output_is_observable); |
| + if (output_is_observable) { |
| + OutputRegisterTransfer(materialized_info, output_info); |
|
rmcilroy
2016/05/26 10:24:53
output_info should be set_materialized(true) shoul
oth
2016/05/26 21:26:51
Re-worked hopefully easier to reason about now ?
rmcilroy
2016/05/27 09:18:58
Looks good, thanks
|
| + } |
| + } |
| +} |
| + |
| +void BytecodeRegisterOptimizer::DoLdar(const BytecodeNode* const node) { |
| + Register input = GetRegisterInputOperand( |
| + 0, node->bytecode(), node->operands(), node->operand_count()); |
| + RegisterInfo* input_info = GetRegisterInfo(input); |
| + RegisterTransfer(input_info, accumulator_info_); |
| +} |
| + |
| +void BytecodeRegisterOptimizer::DoMov(const BytecodeNode* const node) { |
| + Register input = GetRegisterInputOperand( |
| + 0, node->bytecode(), node->operands(), node->operand_count()); |
| + RegisterInfo* input_info = GetRegisterInfo(input); |
| + Register output = GetRegisterOutputOperand( |
| + 1, node->bytecode(), node->operands(), node->operand_count()); |
| + RegisterInfo* output_info = GetOrCreateRegisterInfo(output); |
| + RegisterTransfer(input_info, output_info); |
| +} |
| + |
| +void BytecodeRegisterOptimizer::DoStar(const BytecodeNode* const node) { |
| + Register output = GetRegisterOutputOperand( |
| + 0, node->bytecode(), node->operands(), node->operand_count()); |
| + RegisterInfo* output_info = GetOrCreateRegisterInfo(output); |
| + RegisterTransfer(accumulator_info_, output_info); |
| +} |
| + |
| +void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand( |
| + RegisterInfo* reg_info) { |
| + CreateMaterializedEquivalentIfRequired(reg_info); |
| + |
| + // Put register in it's own equivalence set. |
| + reg_info->RemoveFromEquivalenceSet(); |
| + reg_info->set_materialized(true); |
| + reg_info->set_equivalence_id(equivalence_id_++); |
| +} |
| + |
| +void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand( |
| + Register start, int count) { |
| + for (int i = 0; i < count; ++i) { |
| + Register reg(start.index() + i); |
| + RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg); |
| + PrepareRegisterOutputOperand(reg_info); |
| + } |
| +} |
| + |
| +Register BytecodeRegisterOptimizer::PrepareRegisterInputOperand(Register reg) { |
| + RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg); |
|
rmcilroy
2016/05/26 10:24:53
Reginfo should be create here shouldn't it? Just G
oth
2016/05/26 21:26:51
Comment added. The create case is for the first ti
|
| + if (reg_info->materialized()) { |
| + return reg; |
| + } else { |
| + RegisterInfo* equivalent_info = |
| + GetMaterializedEquivalentNotAccumulator(reg_info); |
| + return equivalent_info->register_value(); |
| + } |
| +} |
| + |
| +void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start, |
| + int count) { |
| + for (int i = 0; i < count; ++i) { |
| + Register current(start.index() + i); |
| + RegisterInfo* input_info = GetRegisterInfo(current); |
| + Materialize(input_info); |
| + } |
| +} |
| + |
| +void BytecodeRegisterOptimizer::PrepareRegisterOperands( |
| + BytecodeNode* const node) { |
| + // |
| + // For each input operand, get a materialized equivalent if it is |
| + // just a single register, otherwise materialize register range. |
| + // Update operand_scale if necessary. |
| + // |
| + // For each output register about to be clobbered, materialize an |
| + // equivalent if it exists. Put each register in it's own equivalence set. |
| + // |
| + int register_operand_bitmap = |
| + Bytecodes::GetRegisterOperandBitmap(node->bytecode()); |
| + const OperandType* operand_types = |
| + Bytecodes::GetOperandTypes(node->bytecode()); |
| + uint32_t* operands = node->operands(); |
| + for (int i = 0; register_operand_bitmap != 0; |
| + ++i, register_operand_bitmap >>= 1) { |
| + if ((register_operand_bitmap & 1) == 0) { |
| + continue; |
| + } |
| + OperandType operand_type = operand_types[i]; |
| + int count = 0; |
| + if (operand_types[i + 1] == OperandType::kRegCount) { |
|
rmcilroy
2016/05/26 10:24:53
This is repeated in a bunch of places (e.g., bytec
oth
2016/05/26 21:26:51
This is a little bit raw. If it's a helper, it'll
|
| + count = static_cast<int>(operands[i + 1]); |
| + if (count == 0) { |
| + continue; |
| + } |
| + } else { |
| + count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type); |
| + } |
| + |
| + Register reg = Register::FromOperand(static_cast<int32_t>(operands[i])); |
| + if (Bytecodes::IsRegisterInputOperandType(operand_type)) { |
| + if (count == 1) { |
| + Register equivalent = PrepareRegisterInputOperand(reg); |
|
rmcilroy
2016/05/26 10:24:53
PrepareRegisterInputOperand does something differe
oth
2016/05/26 21:26:50
Done.
|
| + operands[i] = static_cast<uint32_t>(equivalent.ToOperand()); |
| + // Update operand scale as equivalent may be different. |
| + OperandScale operand_scale = |
| + Bytecodes::OperandSizesToScale(equivalent.SizeOfOperand()); |
| + if (operand_scale > node->operand_scale()) { |
| + node->set_operand_scale(operand_scale); |
| + } |
| + } else if (count > 1) { |
| + PrepareRegisterRangeInputOperand(reg, count); |
| + } |
| + } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) { |
| + PrepareRegisterRangeOutputOperand(reg, count); |
| + } |
| + } |
| +} |
| + |
| +void BytecodeRegisterOptimizer::PrepareAccumulator(BytecodeNode* const node) { |
| + // Materialize the accumulator if it is read by the bytecode. The |
| + // accumulator is special and no other register can be materialized |
| + // in it's place. |
| + if (Bytecodes::ReadsAccumulator(node->bytecode()) && |
| + !accumulator_info_->materialized()) { |
| + Materialize(accumulator_info_); |
| + } |
| + |
| + // Materialize an equivalent to the accumulator if it will be |
| + // clobbered when the bytecode is dispatched. |
| + if (Bytecodes::WritesAccumulator(node->bytecode())) { |
| + PrepareRegisterOutputOperand(accumulator_info_); |
| + } |
| +} |
| + |
| +void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* const node) { |
| + PrepareAccumulator(node); |
| + PrepareRegisterOperands(node); |
| +} |
| + |
| +// static |
| +Register BytecodeRegisterOptimizer::GetRegisterInputOperand( |
| + int index, Bytecode bytecode, const uint32_t* operands, int operand_count) { |
| + DCHECK_LT(index, operand_count); |
| + DCHECK(Bytecodes::IsRegisterInputOperandType( |
| + Bytecodes::GetOperandType(bytecode, index))); |
| + return OperandToRegister(operands[index]); |
| +} |
| + |
| +// static |
| +Register BytecodeRegisterOptimizer::GetRegisterOutputOperand( |
| + int index, Bytecode bytecode, const uint32_t* operands, int operand_count) { |
| + DCHECK_LT(index, operand_count); |
| + DCHECK(Bytecodes::IsRegisterOutputOperandType( |
| + Bytecodes::GetOperandType(bytecode, index))); |
| + return OperandToRegister(operands[index]); |
| +} |
| + |
| +BytecodeRegisterOptimizer::RegisterInfo* |
| +BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) { |
| + size_t index = GetMapIndex(reg); |
| + return (index < register_map_.size()) ? register_map_[index] : nullptr; |
|
rmcilroy
2016/05/26 10:24:53
it looks like we always depend on this returning n
oth
2016/05/26 21:26:50
This can be called from TemporaryRegisterFreeEvent
rmcilroy
2016/05/27 09:18:58
OK sounds good, thanks.
|
| +} |
| + |
| +BytecodeRegisterOptimizer::RegisterInfo* |
| +BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) { |
| + size_t index = GetMapIndex(reg); |
| + return index < register_map_.size() ? register_map_[index] |
| + : NewRegisterInfo(reg); |
| +} |
| + |
| +BytecodeRegisterOptimizer::RegisterInfo* |
| +BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) { |
| + size_t index = GetMapIndex(reg); |
| + DCHECK_GE(index, register_map_.size()); |
| + GrowRegisterMap(reg); |
| + return register_map_[index]; |
| +} |
| + |
| +void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) { |
| + DCHECK(RegisterIsTemporary(reg)); |
| + size_t index = GetMapIndex(reg); |
| + DCHECK_GE(index, register_map_.size()); |
| + size_t new_size = index + 1; |
| + size_t old_size = register_map_.size(); |
| + register_map_.resize(new_size); |
| + for (size_t i = old_size; i < new_size; ++i) { |
| + register_map_[i] = new (zone()) |
| + RegisterInfo(RegisterFromMapIndex(i), equivalence_id_++, false); |
| + } |
| +} |
| + |
| +void BytecodeRegisterOptimizer::TemporaryRegisterFreeEvent(Register reg) { |
| + RegisterInfo* info = GetRegisterInfo(reg); |
| + if (info != nullptr) { |
| + // If register is materialized and part of equivalence set, make |
| + // sure another member of the set holds the value before the |
| + // temporary register is removed. |
| + CreateMaterializedEquivalentIfRequired(info); |
| + info->RemoveFromEquivalenceSet(); |
| + info->set_materialized(false); |
| + info->set_equivalence_id(-1); |
| + } |
| +} |
| + |
| +} // namespace interpreter |
| +} // namespace internal |
| +} // namespace v8 |