| Index: src/interpreter/bytecode-register-optimizer.cc
|
| diff --git a/src/interpreter/bytecode-register-optimizer.cc b/src/interpreter/bytecode-register-optimizer.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..ba2812a02f413c3d2dc4fedf9c8714003d8797ee
|
| --- /dev/null
|
| +++ b/src/interpreter/bytecode-register-optimizer.cc
|
| @@ -0,0 +1,536 @@
|
| +// Copyright 2015 the V8 project authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "src/interpreter/bytecode-register-optimizer.h"
|
| +
|
| +namespace v8 {
|
| +namespace internal {
|
| +namespace interpreter {
|
| +
|
| +class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
|
| + public:
|
| + RegisterInfo(Register reg, int equivalence_id, bool materialized)
|
| + : register_(reg),
|
| + equivalence_id_(equivalence_id),
|
| + materialized_(materialized),
|
| + next_(this),
|
| + prev_(this) {}
|
| +
|
| + void AddToEquivalenceSetOf(RegisterInfo* info);
|
| + void RemoveFromEquivalenceSet();
|
| + bool IsOnlyEquivalent() const;
|
| + bool IsOnlyMaterializedEquivalent() const;
|
| + bool IsEquivalent(RegisterInfo* info) const;
|
| +
|
| + RegisterInfo* GetMaterializedEquivalent();
|
| + RegisterInfo* GetMaterializedEquivalentOtherThan(Register reg);
|
| + RegisterInfo* GetMaterializationCandidate();
|
| +
|
| + Register register_value() const { return register_; }
|
| + bool materialized() const { return materialized_; }
|
| + void set_materialized(bool materialized) { materialized_ = materialized; }
|
| + void set_equivalence_id(int equivalence_id) {
|
| + equivalence_id_ = equivalence_id;
|
| + }
|
| + int equivalence_id() const { return equivalence_id_; }
|
| + RegisterInfo* next() const { return next_; }
|
| +
|
| + private:
|
| + Register register_;
|
| + int equivalence_id_;
|
| + bool materialized_;
|
| +
|
| + // Equivalence set pointers.
|
| + RegisterInfo* next_;
|
| + RegisterInfo* prev_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(RegisterInfo);
|
| +};
|
| +
|
| +void BytecodeRegisterOptimizer::RegisterInfo::AddToEquivalenceSetOf(
|
| + RegisterInfo* info) {
|
| + DCHECK(!IsEquivalent(info) && IsOnlyEquivalent());
|
| + this->next_ = info->next_;
|
| + this->prev_ = info;
|
| + this->prev_->next_ = this;
|
| + this->next_->prev_ = this;
|
| + set_equivalence_id(info->equivalence_id());
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::RegisterInfo::RemoveFromEquivalenceSet() {
|
| + this->next_->prev_ = this->prev_;
|
| + this->prev_->next_ = this->next_;
|
| + this->next_ = this->prev_ = this;
|
| +}
|
| +
|
| +bool BytecodeRegisterOptimizer::RegisterInfo::IsOnlyEquivalent() const {
|
| + return this->next_ == this;
|
| +}
|
| +
|
| +bool BytecodeRegisterOptimizer::RegisterInfo::IsOnlyMaterializedEquivalent()
|
| + const {
|
| + DCHECK(materialized());
|
| +
|
| + const RegisterInfo* visitor = this->next_;
|
| + while (visitor != this) {
|
| + if (visitor->materialized()) {
|
| + return false;
|
| + }
|
| + visitor = visitor->next_;
|
| + }
|
| + return true;
|
| +}
|
| +
|
| +bool BytecodeRegisterOptimizer::RegisterInfo::IsEquivalent(
|
| + RegisterInfo* info) const {
|
| + return equivalence_id() == info->equivalence_id();
|
| +}
|
| +
|
| +BytecodeRegisterOptimizer::RegisterInfo*
|
| +BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalent() {
|
| + RegisterInfo* visitor = this;
|
| + do {
|
| + if (visitor->materialized()) {
|
| + return visitor;
|
| + }
|
| + visitor = visitor->next_;
|
| + } while (visitor != this);
|
| +
|
| + return nullptr;
|
| +}
|
| +
|
| +BytecodeRegisterOptimizer::RegisterInfo*
|
| +BytecodeRegisterOptimizer::RegisterInfo::GetMaterializationCandidate() {
|
| + DCHECK(this->materialized());
|
| + RegisterInfo* visitor = this->next_;
|
| + Register best_reg;
|
| + RegisterInfo* best_info = nullptr;
|
| + while (visitor != this) {
|
| + if (visitor->materialized()) {
|
| + return nullptr;
|
| + }
|
| + if (best_info == nullptr ||
|
| + visitor->register_value() < best_info->register_value()) {
|
| + best_info = visitor;
|
| + }
|
| + visitor = visitor->next_;
|
| + }
|
| + return best_info;
|
| +}
|
| +
|
| +BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
|
| + Zone* zone, TemporaryRegisterAllocator* register_allocator,
|
| + int parameter_count, int fixed_register_count,
|
| + BytecodePipelineStage* next_stage)
|
| + : accumulator_(Register::bytecode_array()),
|
| + temporary_base_(register_allocator->allocation_base()),
|
| + register_map_(zone),
|
| + equivalence_id_(0),
|
| + next_stage_(next_stage),
|
| + flushed_(false),
|
| + zone_(zone) {
|
| + register_allocator->set_observer(this);
|
| +
|
| + // Calculate offset so register index values can be mapped into
|
| + // a vector of register metadata.
|
| + if (parameter_count != 0) {
|
| + map_index_offset_ =
|
| + -Register::FromParameterIndex(0, parameter_count).index();
|
| + } else {
|
| + map_index_offset_ = -accumulator_.index();
|
| + }
|
| +
|
| + register_map_.resize(map_index_offset_ +
|
| + static_cast<size_t>(fixed_register_count));
|
| +
|
| + for (size_t i = 0; i < register_map_.size(); ++i) {
|
| + register_map_[i] = new (zone)
|
| + RegisterInfo(RegisterFromMapIndex(i), equivalence_id_++, true);
|
| + DCHECK_EQ(register_map_[i]->register_value().index(),
|
| + RegisterFromMapIndex(i).index());
|
| + }
|
| + accumulator_info_ = GetRegisterInfo(accumulator_);
|
| + DCHECK(accumulator_info_->register_value() == accumulator_ &&
|
| + accumulator_info_->materialized());
|
| +}
|
| +
|
| +// override
|
| +void BytecodeRegisterOptimizer::FlushBasicBlock() {
|
| + next_stage_->FlushBasicBlock();
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::FlushState() {
|
| + size_t count = register_map_.size();
|
| + for (size_t i = 0; i < count; ++i) {
|
| + RegisterInfo* reg_info = register_map_[i];
|
| + if (!reg_info->materialized() && !reg_info->IsOnlyEquivalent()) {
|
| + DCHECK(RegisterIsTemporary(reg_info->register_value()) ||
|
| + reg_info->register_value() == accumulator_);
|
| + Materialize(reg_info);
|
| + }
|
| + }
|
| +
|
| + for (size_t i = 0; i < count; ++i) {
|
| + RegisterInfo* reg_info = register_map_[i];
|
| + if (!reg_info->IsOnlyEquivalent()) {
|
| + reg_info->RemoveFromEquivalenceSet();
|
| + reg_info->set_equivalence_id(equivalence_id_++);
|
| + }
|
| + }
|
| +}
|
| +
|
| +// override
|
| +size_t BytecodeRegisterOptimizer::FlushForOffset() {
|
| + if (!flushed_) {
|
| + FlushState();
|
| + }
|
| + return next_stage_->FlushForOffset();
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::EmitNop(BytecodeNode* node) {
|
| + BytecodeNode nop(Bytecode::kNop);
|
| + nop.source_info().Update(node->source_info());
|
| + WriteToNextStage(&nop);
|
| + node->source_info().set_invalid();
|
| +}
|
| +
|
| +// override
|
| +void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
|
| + flushed_ = false;
|
| +
|
| + switch (node->bytecode()) {
|
| + case Bytecode::kLdar: {
|
| + DoLdar(node);
|
| + EmitNop(node);
|
| + return;
|
| + }
|
| + case Bytecode::kStar: {
|
| + EmitNop(node);
|
| + DoStar(node);
|
| + return;
|
| + }
|
| + case Bytecode::kMov: {
|
| + EmitNop(node);
|
| + DoMov(node);
|
| + return;
|
| + }
|
| + case Bytecode::kReturn: {
|
| + // The accumulator holds the return value so needs to be materialized.
|
| + if (accumulator_info_->materialized() &&
|
| + !accumulator_info_->IsOnlyMaterializedEquivalent()) {
|
| + accumulator_info_->set_materialized(false);
|
| + }
|
| + Materialize(accumulator_info_);
|
| + WriteToNextStage(node);
|
| + return;
|
| + }
|
| + default:
|
| + break;
|
| + }
|
| +
|
| + if (Bytecodes::IsJump(node->bytecode()) ||
|
| + node->bytecode() == Bytecode::kDebugger) {
|
| + // The debugger can manipulate locals and parameters, flush
|
| + // everything before handing over to it. Similarly, all
|
| + // state must be flushed before emitting a jump.
|
| + FlushState();
|
| + WriteToNextStage(node);
|
| + return;
|
| + }
|
| +
|
| + PrepareOperands(node);
|
| + WriteToNextStage(node);
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::WriteToNextStage(BytecodeNode* node) {
|
| + next_stage_->Write(node);
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::OutputRegisterTransfer(
|
| + RegisterInfo* input_info, RegisterInfo* output_info) {
|
| + Register input = input_info->register_value();
|
| + Register output = output_info->register_value();
|
| + DCHECK_NE(input.index(), output.index());
|
| +
|
| + if (input == accumulator_) {
|
| + uint32_t operand = static_cast<uint32_t>(output.ToOperand());
|
| + OperandScale scale = Bytecodes::OperandSizesToScale(output.SizeOfOperand());
|
| + BytecodeNode node(Bytecode::kStar, operand, scale);
|
| + WriteToNextStage(&node);
|
| + } else if (output == accumulator_) {
|
| + uint32_t operand = static_cast<uint32_t>(input.ToOperand());
|
| + OperandScale scale = Bytecodes::OperandSizesToScale(input.SizeOfOperand());
|
| + BytecodeNode node(Bytecode::kLdar, operand, scale);
|
| + WriteToNextStage(&node);
|
| + } else {
|
| + uint32_t operand0 = static_cast<uint32_t>(input.ToOperand());
|
| + uint32_t operand1 = static_cast<uint32_t>(output.ToOperand());
|
| + OperandScale scale = Bytecodes::OperandSizesToScale(input.SizeOfOperand(),
|
| + output.SizeOfOperand());
|
| + BytecodeNode node(Bytecode::kMov, operand0, operand1, scale);
|
| + WriteToNextStage(&node);
|
| + }
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::CreateMaterializedEquivalentIfRequired(
|
| + RegisterInfo* info) {
|
| + if (info->materialized()) {
|
| + RegisterInfo* unmaterialized = info->GetMaterializationCandidate();
|
| + if (unmaterialized) {
|
| + OutputRegisterTransfer(info, unmaterialized);
|
| + unmaterialized->set_materialized(true);
|
| + }
|
| + }
|
| +}
|
| +
|
| +BytecodeRegisterOptimizer::RegisterInfo*
|
| +BytecodeRegisterOptimizer::GetMaterializedEquivalent(RegisterInfo* info) {
|
| + return info->materialized() ? info : info->GetMaterializedEquivalent();
|
| +}
|
| +
|
| +BytecodeRegisterOptimizer::RegisterInfo*
|
| +BytecodeRegisterOptimizer::GetMaterializedEquivalentNotAccumulator(
|
| + RegisterInfo* info) {
|
| + if (info->materialized()) {
|
| + return info;
|
| + }
|
| +
|
| + RegisterInfo* result = info->GetMaterializedEquivalent();
|
| + if (result->register_value() == accumulator_) {
|
| + result = result->next()->GetMaterializedEquivalent();
|
| + if (result->register_value() == accumulator_) {
|
| + Materialize(info);
|
| + result = info;
|
| + }
|
| + }
|
| + return result;
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::Materialize(RegisterInfo* info) {
|
| + if (!info->materialized()) {
|
| + RegisterInfo* materialized = info->GetMaterializedEquivalent();
|
| + OutputRegisterTransfer(materialized, info);
|
| + info->set_materialized(true);
|
| + }
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::RegisterTransfer(RegisterInfo* input_info,
|
| + RegisterInfo* output_info) {
|
| + // Ensure value associated with output is materialized into a
|
| + // different register if necessary.
|
| + CreateMaterializedEquivalentIfRequired(output_info);
|
| + bool output_is_observable =
|
| + RegisterIsObservable(output_info->register_value());
|
| +
|
| + // If the output and input are not equivalent or the output is
|
| + // visible to the user/debugger, update equivalence set and
|
| + // potentially emit store.
|
| + if (!output_info->IsEquivalent(input_info) || output_is_observable) {
|
| + // Remove from current set and invalidate equivalence id.
|
| + output_info->RemoveFromEquivalenceSet();
|
| + output_info->set_equivalence_id(equivalence_id_++);
|
| +
|
| + // Place in new equivalence set.
|
| + RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent();
|
| + output_info->AddToEquivalenceSetOf(materialized_info);
|
| + output_info->set_materialized(output_is_observable);
|
| + if (output_is_observable) {
|
| + OutputRegisterTransfer(materialized_info, output_info);
|
| + }
|
| + }
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::DoLdar(const BytecodeNode* const node) {
|
| + Register input = GetRegisterInputOperand(
|
| + 0, node->bytecode(), node->operands(), node->operand_count());
|
| + RegisterInfo* input_info = GetRegisterInfo(input);
|
| + RegisterTransfer(input_info, accumulator_info_);
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::DoMov(const BytecodeNode* const node) {
|
| + Register input = GetRegisterInputOperand(
|
| + 0, node->bytecode(), node->operands(), node->operand_count());
|
| + RegisterInfo* input_info = GetRegisterInfo(input);
|
| + Register output = GetRegisterOutputOperand(
|
| + 1, node->bytecode(), node->operands(), node->operand_count());
|
| + RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
|
| + RegisterTransfer(input_info, output_info);
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::DoStar(const BytecodeNode* const node) {
|
| + Register output = GetRegisterOutputOperand(
|
| + 0, node->bytecode(), node->operands(), node->operand_count());
|
| + RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
|
| + RegisterTransfer(accumulator_info_, output_info);
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::PrepareForClobber(RegisterInfo* reg_info) {
|
| + CreateMaterializedEquivalentIfRequired(reg_info);
|
| +
|
| + // Put register in it's own equivalence set.
|
| + reg_info->RemoveFromEquivalenceSet();
|
| + reg_info->set_materialized(true);
|
| + reg_info->set_equivalence_id(equivalence_id_++);
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::PrepareForClobberRange(Register start,
|
| + int count) {
|
| + for (int i = 0; i < count; ++i) {
|
| + Register reg(start.index() + i);
|
| + RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
|
| + PrepareForClobber(reg_info);
|
| + }
|
| +}
|
| +
|
| +Register BytecodeRegisterOptimizer::PrepareRegisterInputOperand(Register reg) {
|
| + RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
|
| + if (reg_info->materialized()) {
|
| + return reg;
|
| + } else {
|
| + RegisterInfo* equivalent_info =
|
| + GetMaterializedEquivalentNotAccumulator(reg_info);
|
| + return equivalent_info->register_value();
|
| + }
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start,
|
| + int count) {
|
| + for (int i = 0; i < count; ++i) {
|
| + Register current(start.index() + i);
|
| + RegisterInfo* input_info = GetRegisterInfo(current);
|
| + Materialize(input_info);
|
| + }
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* node) {
|
| + if (Bytecodes::ReadsAccumulator(node->bytecode()) &&
|
| + !accumulator_info_->materialized()) {
|
| + Materialize(accumulator_info_);
|
| + }
|
| +
|
| + if (Bytecodes::WritesAccumulator(node->bytecode())) {
|
| + PrepareForClobber(accumulator_info_);
|
| + }
|
| +
|
| + // For each input, get a materialized equivalent if a single register
|
| + // else materialize if part of a list longer than 1.
|
| + // Update operand_scale if necessary.
|
| + //
|
| + // For each output register about to be clobbered, materialize an
|
| + // equivalent if it exists. Put each register in it's own equivalence set.
|
| + int register_operand_bitmap =
|
| + Bytecodes::GetRegisterOperandBitmap(node->bytecode());
|
| + const OperandType* operand_types =
|
| + Bytecodes::GetOperandTypes(node->bytecode());
|
| + uint32_t* operands = node->operands();
|
| + for (int i = 0; register_operand_bitmap != 0;
|
| + ++i, register_operand_bitmap >>= 1) {
|
| + if ((register_operand_bitmap & 1) == 0) {
|
| + continue;
|
| + }
|
| + OperandType operand_type = operand_types[i];
|
| + int count = 0;
|
| + if (operand_types[i + 1] == OperandType::kRegCount) {
|
| + count = static_cast<int>(operands[i + 1]);
|
| + if (count == 0) {
|
| + continue;
|
| + }
|
| + } else {
|
| + count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
|
| + }
|
| +
|
| + Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
|
| + if (Bytecodes::IsRegisterInputOperandType(operand_type)) {
|
| + if (count == 1) {
|
| + Register equivalent = PrepareRegisterInputOperand(reg);
|
| + operands[i] = static_cast<uint32_t>(equivalent.ToOperand());
|
| + // Update operand scale as equivalent may be different.
|
| + OperandScale operand_scale =
|
| + Bytecodes::OperandSizesToScale(equivalent.SizeOfOperand());
|
| + if (operand_scale > node->operand_scale()) {
|
| + node->set_operand_scale(operand_scale);
|
| + }
|
| + } else if (count > 1) {
|
| + PrepareRegisterRangeInputOperand(reg, count);
|
| + }
|
| + } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) {
|
| + PrepareForClobberRange(reg, count);
|
| + }
|
| + }
|
| +}
|
| +
|
| +// static
|
| +Register BytecodeRegisterOptimizer::OperandToRegister(uint32_t operand) {
|
| + return Register::FromOperand(static_cast<int32_t>(operand));
|
| +}
|
| +
|
| +// static
|
| +Register BytecodeRegisterOptimizer::GetRegisterInputOperand(
|
| + int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
|
| + DCHECK_LT(index, operand_count);
|
| + DCHECK(Bytecodes::IsRegisterInputOperandType(
|
| + Bytecodes::GetOperandType(bytecode, index)));
|
| + return OperandToRegister(operands[index]);
|
| +}
|
| +
|
| +// static
|
| +Register BytecodeRegisterOptimizer::GetRegisterOutputOperand(
|
| + int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
|
| + DCHECK_LT(index, operand_count);
|
| + DCHECK(Bytecodes::IsRegisterOutputOperandType(
|
| + Bytecodes::GetOperandType(bytecode, index)));
|
| + return OperandToRegister(operands[index]);
|
| +}
|
| +
|
| +BytecodeRegisterOptimizer::RegisterInfo*
|
| +BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) {
|
| + size_t index = GetMapIndex(reg);
|
| + return (index < register_map_.size()) ? register_map_[index] : nullptr;
|
| +}
|
| +
|
| +BytecodeRegisterOptimizer::RegisterInfo*
|
| +BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) {
|
| + size_t index = GetMapIndex(reg);
|
| + return index < register_map_.size() ? register_map_[index]
|
| + : NewRegisterInfo(reg);
|
| +}
|
| +
|
| +BytecodeRegisterOptimizer::RegisterInfo*
|
| +BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) {
|
| + size_t index = GetMapIndex(reg);
|
| + DCHECK_GE(index, register_map_.size());
|
| + GrowRegisterMap(reg);
|
| + return register_map_[index];
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
|
| + DCHECK(RegisterIsTemporary(reg));
|
| + size_t index = GetMapIndex(reg);
|
| + DCHECK_GE(index, register_map_.size());
|
| + size_t new_size = index + 1;
|
| + size_t old_size = register_map_.size();
|
| + register_map_.resize(new_size);
|
| + for (size_t i = old_size; i < new_size; ++i) {
|
| + register_map_[i] = new (zone())
|
| + RegisterInfo(RegisterFromMapIndex(i), equivalence_id_++, false);
|
| + }
|
| +}
|
| +
|
| +void BytecodeRegisterOptimizer::TemporaryRegisterFreeEvent(Register reg) {
|
| + RegisterInfo* info = GetRegisterInfo(reg);
|
| +
|
| + if (info != nullptr) {
|
| + // If register is materialized and part of equivalence set, make
|
| + // sure another member of the set holds the value before the
|
| + // temporary register is culled.
|
| + CreateMaterializedEquivalentIfRequired(info);
|
| + info->RemoveFromEquivalenceSet();
|
| + info->set_materialized(false);
|
| + info->set_equivalence_id(-1);
|
| + }
|
| +}
|
| +
|
| +} // namespace interpreter
|
| +} // namespace internal
|
| +} // namespace v8
|
|
|