| Index: src/arm64/lithium-gap-resolver-arm64.cc
|
| diff --git a/src/arm64/lithium-gap-resolver-arm64.cc b/src/arm64/lithium-gap-resolver-arm64.cc
|
| index c721cb48a8cdf4150be08ad715d6ba73ef76e14f..1a5ff8c3cda43eed802f07d4e575b7629f11ccae 100644
|
| --- a/src/arm64/lithium-gap-resolver-arm64.cc
|
| +++ b/src/arm64/lithium-gap-resolver-arm64.cc
|
| @@ -41,22 +41,23 @@ void LGapResolver::Resolve(LParallelMove* parallel_move) {
|
| for (int i = 0; i < moves_.length(); ++i) {
|
| LMoveOperands move = moves_[i];
|
|
|
| - // Skip constants to perform them last. They don't block other moves
|
| - // and skipping such moves with register destinations keeps those
|
| - // registers free for the whole algorithm.
|
| - if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
|
| + // Skip constants and value regenerations to perform them last. They don't
|
| + // block other moves and skipping such moves with register destinations
|
| + // keeps those registers free for the whole algorithm.
|
| + if (!move.IsEliminated() &&
|
| + !move.UsesRegeneration() && !move.source()->IsConstantOperand()) {
|
| root_index_ = i; // Any cycle is found when we reach this move again.
|
| PerformMove(i);
|
| if (in_cycle_) RestoreValue();
|
| }
|
| }
|
|
|
| - // Perform the moves with constant sources.
|
| + // Perform the moves with constant sources or regenerating the result.
|
| for (int i = 0; i < moves_.length(); ++i) {
|
| LMoveOperands move = moves_[i];
|
|
|
| if (!move.IsEliminated()) {
|
| - ASSERT(move.source()->IsConstantOperand());
|
| + ASSERT(move.UsesRegeneration() || move.source()->IsConstantOperand());
|
| EmitMove(i);
|
| }
|
| }
|
| @@ -216,6 +217,35 @@ void LGapResolver::EmitMove(int index) {
|
| LOperand* source = moves_[index].source();
|
| LOperand* destination = moves_[index].destination();
|
|
|
| + if (moves_[index].UsesRegeneration()) {
|
| + if (source->IsInRegister()) {
|
| + ASSERT(destination->IsInMemory());
|
| + if (source->parent_linstr()->hydrogen_value()->HasPhiUses()) {
|
| + // Phis insertion does not have access to the constant cache, so they
|
| + // won't be replaced by regenerations. If we skip the spill operation,
|
| + // the phi would then load garbage from the stack.
|
| + } else {
|
| + // Skip the store to memory. All gaps loading this value will instead
|
| + // regenerate it.
|
| + moves_[index].Eliminate();
|
| + return;
|
| + }
|
| + } else {
|
| + ASSERT(source->IsInMemory() && destination->IsInRegister());
|
| + ASSERT(destination->parent_linstr()->HasResult());
|
| + LTemplateResultInstruction<1>* instr =
|
| + reinterpret_cast<LTemplateResultInstruction<1>*>(
|
| + destination->parent_linstr());
|
| + ASSERT(instr->PreferRegenerateToSpill());
|
| + LOperand* prev_result = instr->result();
|
| + instr->set_result(destination);
|
| + instr->CompileToNative(cgen_);
|
| + instr->set_result(prev_result);
|
| + moves_[index].Eliminate();
|
| + return;
|
| + }
|
| + }
|
| +
|
| // Dispatch on the source and destination operand kinds. Not all
|
| // combinations are possible.
|
|
|
|
|