Index: src/interpreter/bytecode-array-writer.cc |
diff --git a/src/interpreter/bytecode-array-writer.cc b/src/interpreter/bytecode-array-writer.cc |
index 93db4530165686b6f8cf9954f5267c5f1bdd9647..02948dc5af895c445f79eb775e933adb86ee71a3 100644 |
--- a/src/interpreter/bytecode-array-writer.cc |
+++ b/src/interpreter/bytecode-array-writer.cc |
@@ -21,27 +21,23 @@ BytecodeArrayWriter::BytecodeArrayWriter( |
Zone* zone, ConstantArrayBuilder* constant_array_builder, |
SourcePositionTableBuilder::RecordingMode source_position_mode) |
: bytecodes_(zone), |
- max_register_count_(0), |
unbound_jumps_(0), |
source_position_table_builder_(zone, source_position_mode), |
- constant_array_builder_(constant_array_builder) {} |
+ constant_array_builder_(constant_array_builder) { |
+ bytecodes_.reserve(512); // Derived via experimentation. |
+} |
// override |
BytecodeArrayWriter::~BytecodeArrayWriter() {} |
// override |
Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray( |
- Isolate* isolate, int fixed_register_count, int parameter_count, |
+ Isolate* isolate, int register_count, int parameter_count, |
Handle<FixedArray> handler_table) { |
DCHECK_EQ(0, unbound_jumps_); |
int bytecode_size = static_cast<int>(bytecodes()->size()); |
- |
- // All locals need a frame slot for the debugger, but may not be |
- // present in generated code. |
- int frame_size_for_locals = fixed_register_count * kPointerSize; |
- int frame_size_used = max_register_count() * kPointerSize; |
- int frame_size = std::max(frame_size_for_locals, frame_size_used); |
+ int frame_size = register_count * kPointerSize; |
Handle<FixedArray> constant_pool = |
constant_array_builder()->ToFixedArray(isolate); |
Handle<BytecodeArray> bytecode_array = isolate->factory()->NewBytecodeArray( |
@@ -104,116 +100,48 @@ void BytecodeArrayWriter::UpdateSourcePositionTable( |
} |
} |
-namespace { |
- |
-OperandScale ScaleForScalableByteOperand(OperandSize operand_size) { |
- STATIC_ASSERT(static_cast<int>(OperandSize::kByte) == |
- static_cast<int>(OperandScale::kSingle)); |
- STATIC_ASSERT(static_cast<int>(OperandSize::kShort) == |
- static_cast<int>(OperandScale::kDouble)); |
- STATIC_ASSERT(static_cast<int>(OperandSize::kQuad) == |
- static_cast<int>(OperandScale::kQuadruple)); |
- return static_cast<OperandScale>(operand_size); |
-} |
- |
-OperandScale OperandScaleForScalableSignedByte(uint32_t operand_value) { |
- int32_t signed_operand = static_cast<int32_t>(operand_value); |
- OperandSize bytes_required = Bytecodes::SizeForSignedOperand(signed_operand); |
- return ScaleForScalableByteOperand(bytes_required); |
-} |
- |
-OperandScale OperandScaleForScalableUnsignedByte(uint32_t operand_value) { |
- OperandSize bytes_required = Bytecodes::SizeForUnsignedOperand(operand_value); |
- return ScaleForScalableByteOperand(bytes_required); |
-} |
- |
-OperandScale GetOperandScale(const BytecodeNode* const node) { |
- const OperandTypeInfo* operand_type_infos = |
- Bytecodes::GetOperandTypeInfos(node->bytecode()); |
- OperandScale operand_scale = OperandScale::kSingle; |
- int operand_count = node->operand_count(); |
- for (int i = 0; i < operand_count; ++i) { |
- switch (operand_type_infos[i]) { |
- case OperandTypeInfo::kScalableSignedByte: { |
- uint32_t operand = node->operand(i); |
- operand_scale = |
- std::max(operand_scale, OperandScaleForScalableSignedByte(operand)); |
- break; |
- } |
- case OperandTypeInfo::kScalableUnsignedByte: { |
- uint32_t operand = node->operand(i); |
- operand_scale = std::max(operand_scale, |
- OperandScaleForScalableUnsignedByte(operand)); |
- break; |
- } |
- case OperandTypeInfo::kFixedUnsignedByte: |
- case OperandTypeInfo::kFixedUnsignedShort: |
- break; |
- case OperandTypeInfo::kNone: |
- UNREACHABLE(); |
- break; |
- } |
- } |
- return operand_scale; |
-} |
- |
-} // namespace |
- |
void BytecodeArrayWriter::EmitBytecode(const BytecodeNode* const node) { |
DCHECK_NE(node->bytecode(), Bytecode::kIllegal); |
- uint8_t buffer[kMaxSizeOfPackedBytecode]; |
- uint8_t* buffer_limit = buffer; |
+ Bytecode bytecode = node->bytecode(); |
+ OperandScale operand_scale = node->operand_scale(); |
- OperandScale operand_scale = GetOperandScale(node); |
if (operand_scale != OperandScale::kSingle) { |
Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale); |
- *buffer_limit++ = Bytecodes::ToByte(prefix); |
+ bytecodes()->push_back(Bytecodes::ToByte(prefix)); |
} |
- |
- Bytecode bytecode = node->bytecode(); |
- *buffer_limit++ = Bytecodes::ToByte(bytecode); |
+ bytecodes()->push_back(Bytecodes::ToByte(bytecode)); |
const uint32_t* const operands = node->operands(); |
- const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode); |
- const int operand_count = Bytecodes::NumberOfOperands(bytecode); |
+ const int operand_count = node->operand_count(); |
+ const OperandSize* operand_sizes = |
+ Bytecodes::GetOperandSizes(bytecode, operand_scale); |
for (int i = 0; i < operand_count; ++i) { |
- OperandSize operand_size = |
- Bytecodes::SizeOfOperand(operand_types[i], operand_scale); |
- switch (operand_size) { |
+ switch (operand_sizes[i]) { |
case OperandSize::kNone: |
UNREACHABLE(); |
break; |
case OperandSize::kByte: |
- *buffer_limit++ = static_cast<uint8_t>(operands[i]); |
+ bytecodes()->push_back(static_cast<uint8_t>(operands[i])); |
break; |
case OperandSize::kShort: { |
- WriteUnalignedUInt16(buffer_limit, operands[i]); |
- buffer_limit += 2; |
+ const uint8_t* raw_operand = |
+ reinterpret_cast<const uint8_t*>(&operands[i]); |
+ bytecodes()->push_back(raw_operand[0]); |
+ bytecodes()->push_back(raw_operand[1]); |
break; |
} |
case OperandSize::kQuad: { |
- WriteUnalignedUInt32(buffer_limit, operands[i]); |
- buffer_limit += 4; |
+ const uint8_t* raw_operand = |
+ reinterpret_cast<const uint8_t*>(&operands[i]); |
+ bytecodes()->push_back(raw_operand[0]); |
+ bytecodes()->push_back(raw_operand[1]); |
+ bytecodes()->push_back(raw_operand[2]); |
+ bytecodes()->push_back(raw_operand[3]); |
break; |
} |
} |
- |
- int count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]); |
- if (count == 0) { |
- continue; |
- } |
- // NB operand_types is terminated by OperandType::kNone so |
- // operand_types[i + 1] is valid whilst i < operand_count. |
- if (operand_types[i + 1] == OperandType::kRegCount) { |
- count = static_cast<int>(operands[i]); |
- } |
- Register reg = Register::FromOperand(static_cast<int32_t>(operands[i])); |
- max_register_count_ = std::max(max_register_count_, reg.index() + count); |
} |
- |
- DCHECK_LE(buffer_limit, buffer + sizeof(buffer)); |
- bytecodes()->insert(bytecodes()->end(), buffer, buffer_limit); |
} |
// static |
@@ -247,18 +175,17 @@ void BytecodeArrayWriter::PatchJumpWith8BitOperand(size_t jump_location, |
DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode)); |
size_t operand_location = jump_location + 1; |
DCHECK_EQ(bytecodes()->at(operand_location), k8BitJumpPlaceholder); |
- if (Bytecodes::SizeForSignedOperand(delta) == OperandSize::kByte) { |
- // The jump fits within the range of an Imm operand, so cancel |
+ if (Bytecodes::ScaleForSignedOperand(delta) == OperandScale::kSingle) { |
+ // The jump fits within the range of an Imm8 operand, so cancel |
// the reservation and jump directly. |
constant_array_builder()->DiscardReservedEntry(OperandSize::kByte); |
bytecodes()->at(operand_location) = static_cast<uint8_t>(delta); |
} else { |
- // The jump does not fit within the range of an Imm operand, so |
+ // The jump does not fit within the range of an Imm8 operand, so |
// commit reservation putting the offset into the constant pool, |
// and update the jump instruction and operand. |
size_t entry = constant_array_builder()->CommitReservedEntry( |
OperandSize::kByte, Smi::FromInt(delta)); |
- DCHECK_LE(entry, kMaxUInt32); |
DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)), |
OperandSize::kByte); |
jump_bytecode = GetJumpWithConstantOperand(jump_bytecode); |
@@ -273,14 +200,21 @@ void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location, |
DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode)); |
size_t operand_location = jump_location + 1; |
uint8_t operand_bytes[2]; |
- if (Bytecodes::SizeForSignedOperand(delta) <= OperandSize::kShort) { |
+ if (Bytecodes::ScaleForSignedOperand(delta) <= OperandScale::kDouble) { |
+ // The jump fits within the range of an Imm16 operand, so cancel |
+ // the reservation and jump directly. |
constant_array_builder()->DiscardReservedEntry(OperandSize::kShort); |
WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta)); |
} else { |
- jump_bytecode = GetJumpWithConstantOperand(jump_bytecode); |
- bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode); |
+ // The jump does not fit within the range of an Imm16 operand, so |
+ // commit reservation putting the offset into the constant pool, |
+ // and update the jump instruction and operand. |
size_t entry = constant_array_builder()->CommitReservedEntry( |
OperandSize::kShort, Smi::FromInt(delta)); |
+ DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)), |
+ OperandSize::kShort); |
+ jump_bytecode = GetJumpWithConstantOperand(jump_bytecode); |
+ bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode); |
WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry)); |
} |
DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder && |
@@ -351,8 +285,8 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) { |
// Label has been bound already so this is a backwards jump. |
size_t abs_delta = current_offset - label->offset(); |
int delta = -static_cast<int>(abs_delta); |
- OperandSize operand_size = Bytecodes::SizeForSignedOperand(delta); |
- if (operand_size > OperandSize::kByte) { |
+ OperandScale operand_scale = Bytecodes::ScaleForSignedOperand(delta); |
+ if (operand_scale > OperandScale::kSingle) { |
// Adjust for scaling byte prefix for wide jump offset. |
DCHECK_LE(delta, 0); |
delta -= 1; |