OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/interpreter/bytecode-register-optimizer.h" | 5 #include "src/interpreter/bytecode-register-optimizer.h" |
6 | 6 |
7 namespace v8 { | 7 namespace v8 { |
8 namespace internal { | 8 namespace internal { |
9 namespace interpreter { | 9 namespace interpreter { |
10 | 10 |
(...skipping 306 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
317 | 317 |
318 void BytecodeRegisterOptimizer::OutputRegisterTransfer( | 318 void BytecodeRegisterOptimizer::OutputRegisterTransfer( |
319 RegisterInfo* input_info, RegisterInfo* output_info, | 319 RegisterInfo* input_info, RegisterInfo* output_info, |
320 const BytecodeSourceInfo& source_info) { | 320 const BytecodeSourceInfo& source_info) { |
321 Register input = input_info->register_value(); | 321 Register input = input_info->register_value(); |
322 Register output = output_info->register_value(); | 322 Register output = output_info->register_value(); |
323 DCHECK_NE(input.index(), output.index()); | 323 DCHECK_NE(input.index(), output.index()); |
324 | 324 |
325 if (input == accumulator_) { | 325 if (input == accumulator_) { |
326 uint32_t operand = static_cast<uint32_t>(output.ToOperand()); | 326 uint32_t operand = static_cast<uint32_t>(output.ToOperand()); |
327 OperandScale scale = Bytecodes::OperandSizesToScale(output.SizeOfOperand()); | 327 BytecodeNode node(Bytecode::kStar, operand); |
328 BytecodeNode node(Bytecode::kStar, operand, scale); | |
329 WriteToNextStage(&node, source_info); | 328 WriteToNextStage(&node, source_info); |
330 } else if (output == accumulator_) { | 329 } else if (output == accumulator_) { |
331 uint32_t operand = static_cast<uint32_t>(input.ToOperand()); | 330 uint32_t operand = static_cast<uint32_t>(input.ToOperand()); |
332 OperandScale scale = Bytecodes::OperandSizesToScale(input.SizeOfOperand()); | 331 BytecodeNode node(Bytecode::kLdar, operand); |
333 BytecodeNode node(Bytecode::kLdar, operand, scale); | |
334 WriteToNextStage(&node, source_info); | 332 WriteToNextStage(&node, source_info); |
335 } else { | 333 } else { |
336 uint32_t operand0 = static_cast<uint32_t>(input.ToOperand()); | 334 uint32_t operand0 = static_cast<uint32_t>(input.ToOperand()); |
337 uint32_t operand1 = static_cast<uint32_t>(output.ToOperand()); | 335 uint32_t operand1 = static_cast<uint32_t>(output.ToOperand()); |
338 OperandScale scale = Bytecodes::OperandSizesToScale(input.SizeOfOperand(), | 336 BytecodeNode node(Bytecode::kMov, operand0, operand1); |
339 output.SizeOfOperand()); | |
340 BytecodeNode node(Bytecode::kMov, operand0, operand1, scale); | |
341 WriteToNextStage(&node, source_info); | 337 WriteToNextStage(&node, source_info); |
342 } | 338 } |
343 output_info->set_materialized(true); | 339 output_info->set_materialized(true); |
344 } | 340 } |
345 | 341 |
346 void BytecodeRegisterOptimizer::CreateMaterializedEquivalent( | 342 void BytecodeRegisterOptimizer::CreateMaterializedEquivalent( |
347 RegisterInfo* info) { | 343 RegisterInfo* info) { |
348 DCHECK(info->materialized()); | 344 DCHECK(info->materialized()); |
349 RegisterInfo* unmaterialized = info->GetEquivalentToMaterialize(); | 345 RegisterInfo* unmaterialized = info->GetEquivalentToMaterialize(); |
350 if (unmaterialized) { | 346 if (unmaterialized) { |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
476 GetMaterializedEquivalentNotAccumulator(reg_info); | 472 GetMaterializedEquivalentNotAccumulator(reg_info); |
477 return equivalent_info->register_value(); | 473 return equivalent_info->register_value(); |
478 } | 474 } |
479 } | 475 } |
480 | 476 |
481 void BytecodeRegisterOptimizer::PrepareRegisterInputOperand( | 477 void BytecodeRegisterOptimizer::PrepareRegisterInputOperand( |
482 BytecodeNode* const node, Register reg, int operand_index) { | 478 BytecodeNode* const node, Register reg, int operand_index) { |
483 Register equivalent = GetEquivalentRegisterForInputOperand(reg); | 479 Register equivalent = GetEquivalentRegisterForInputOperand(reg); |
484 node->operands()[operand_index] = | 480 node->operands()[operand_index] = |
485 static_cast<uint32_t>(equivalent.ToOperand()); | 481 static_cast<uint32_t>(equivalent.ToOperand()); |
486 // Update operand scale as equivalent may be different. | |
487 OperandScale operand_scale = | |
488 Bytecodes::OperandSizesToScale(equivalent.SizeOfOperand()); | |
489 if (operand_scale > node->operand_scale()) { | |
490 node->set_operand_scale(operand_scale); | |
491 } | |
492 } | 482 } |
493 | 483 |
494 void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start, | 484 void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start, |
495 int count) { | 485 int count) { |
496 for (int i = 0; i < count; ++i) { | 486 for (int i = 0; i < count; ++i) { |
497 Register current(start.index() + i); | 487 Register current(start.index() + i); |
498 RegisterInfo* input_info = GetRegisterInfo(current); | 488 RegisterInfo* input_info = GetRegisterInfo(current); |
499 Materialize(input_info); | 489 Materialize(input_info); |
500 } | 490 } |
501 } | 491 } |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
627 if (info->materialized()) { | 617 if (info->materialized()) { |
628 CreateMaterializedEquivalent(info); | 618 CreateMaterializedEquivalent(info); |
629 } | 619 } |
630 info->MoveToNewEquivalenceSet(kInvalidEquivalenceId, false); | 620 info->MoveToNewEquivalenceSet(kInvalidEquivalenceId, false); |
631 } | 621 } |
632 } | 622 } |
633 | 623 |
634 } // namespace interpreter | 624 } // namespace interpreter |
635 } // namespace internal | 625 } // namespace internal |
636 } // namespace v8 | 626 } // namespace v8 |
OLD | NEW |