Chromium Code Reviews| Index: src/lithium-allocator.cc |
| diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc |
| index 7d09d179d2dce29a06715d7339f49d89f96b0b37..2d68859fe583b255320c51026a2544d3d4ed1a8c 100644 |
| --- a/src/lithium-allocator.cc |
| +++ b/src/lithium-allocator.cc |
| @@ -61,7 +61,6 @@ DEFINE_OPERAND_CACHE(LDoubleRegister, DOUBLE_REGISTER) |
| #undef DEFINE_OPERAND_CACHE |
| - |
| static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) { |
| return a.Value() < b.Value() ? a : b; |
| } |
| @@ -247,7 +246,7 @@ LOperand* LiveRange::CreateAssignedOperand() { |
| LOperand* op = NULL; |
| if (HasRegisterAssigned()) { |
| ASSERT(!IsSpilled()); |
| - if (assigned_double_) { |
| + if (assigned_register_class_ == DOUBLE_REGISTERS) { |
|
Kevin Millikin (Chromium)
2010/12/10 07:39:19
Could use your IsDouble() predicate here.
Vyacheslav Egorov (Chromium)
2010/12/10 14:18:27
Done.
|
| op = LDoubleRegister::Create(assigned_register()); |
| } else { |
| op = LRegister::Create(assigned_register()); |
| @@ -290,7 +289,7 @@ void LiveRange::AdvanceLastProcessedMarker( |
| void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) { |
| - ASSERT(Start().Value() <= position.Value()); |
| + ASSERT(Start().Value() < position.Value()); |
| ASSERT(result->IsEmpty()); |
| // Find the last interval that ends before the position. If the |
| // position is contained in one of the intervals in the chain, we |
| @@ -625,7 +624,7 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) { |
| if (result == NULL) { |
| result = new LiveRange(FixedLiveRangeID(index)); |
| ASSERT(result->IsFixed()); |
| - result->set_assigned_register(index, false); |
| + result->set_assigned_register(index, CPU_REGISTERS); |
| fixed_live_ranges_[index] = result; |
| } |
| return result; |
| @@ -642,7 +641,7 @@ LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) { |
| if (result == NULL) { |
| result = new LiveRange(FixedDoubleLiveRangeID(index)); |
| ASSERT(result->IsFixed()); |
| - result->set_assigned_register(index, true); |
| + result->set_assigned_register(index, DOUBLE_REGISTERS); |
| fixed_double_live_ranges_[index] = result; |
| } |
| return result; |
| @@ -1258,14 +1257,6 @@ void LAllocator::BuildLiveRanges() { |
| } |
| -void LAllocator::AllocateGeneralRegisters() { |
| - HPhase phase("Allocate general registers", this); |
| - num_registers_ = Register::kNumAllocatableRegisters; |
| - mode_ = CPU_REGISTERS; |
| - AllocateRegisters(); |
| -} |
| - |
| - |
| bool LAllocator::SafePointsAreInOrder() const { |
| const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps(); |
| int safe_point = 0; |
| @@ -1397,10 +1388,18 @@ void LAllocator::ProcessOsrEntry() { |
| } |
| +void LAllocator::AllocateGeneralRegisters() { |
| + HPhase phase("Allocate general registers", this); |
| + num_registers_ = Register::kNumAllocatableRegisters; |
| + mode_ = CPU_REGISTERS; |
| + AllocateRegisters(); |
| +} |
| + |
| + |
| void LAllocator::AllocateDoubleRegisters() { |
| HPhase phase("Allocate double registers", this); |
| num_registers_ = DoubleRegister::kNumAllocatableRegisters; |
| - mode_ = XMM_REGISTERS; |
| + mode_ = DOUBLE_REGISTERS; |
| AllocateRegisters(); |
| } |
| @@ -1411,7 +1410,7 @@ void LAllocator::AllocateRegisters() { |
| for (int i = 0; i < live_ranges_.length(); ++i) { |
| if (live_ranges_[i] != NULL) { |
| - if (HasDoubleValue(live_ranges_[i]->id()) == (mode_ == XMM_REGISTERS)) { |
| + if (RequiredRegisterClass(live_ranges_[i]->id()) == mode_) { |
| AddToUnhandledUnsorted(live_ranges_[i]); |
| } |
| } |
| @@ -1422,7 +1421,7 @@ void LAllocator::AllocateRegisters() { |
| ASSERT(active_live_ranges_.is_empty()); |
| ASSERT(inactive_live_ranges_.is_empty()); |
| - if (mode_ == XMM_REGISTERS) { |
| + if (mode_ == DOUBLE_REGISTERS) { |
| for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) { |
| LiveRange* current = fixed_double_live_ranges_.at(i); |
| if (current != NULL) { |
| @@ -1463,11 +1462,7 @@ void LAllocator::AllocateRegisters() { |
| current->Start().NextInstruction().Value()) { |
| // Do not spill live range eagerly if use position that can benefit from |
| // the register is too close to the start of live range. |
| - LiveRange* part = Split(current, |
| - current->Start().NextInstruction(), |
| - pos->pos()); |
| - Spill(current); |
| - AddToUnhandledSorted(part); |
| + SpillBetween(current, current->Start(), pos->pos()); |
| ASSERT(UnhandledIsSorted()); |
| continue; |
| } |
| @@ -1544,10 +1539,12 @@ bool LAllocator::HasTaggedValue(int virtual_register) const { |
| } |
| -bool LAllocator::HasDoubleValue(int virtual_register) const { |
| +RegistersClass LAllocator::RequiredRegisterClass(int virtual_register) const { |
| HValue* value = graph()->LookupValue(virtual_register); |
| - if (value == NULL) return false; |
| - return value->representation().IsDouble(); |
| + if (value != NULL && value->representation().IsDouble()) { |
| + return DOUBLE_REGISTERS; |
| + } |
| + return CPU_REGISTERS; |
| } |
| @@ -1728,16 +1725,22 @@ void LAllocator::InactiveToActive(LiveRange* range) { |
| } |
| +// TryAllocateFreeReg and AllocateBlockedReg assume this |
| +// when allocating local arrays. |
| +STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >= |
| + Register::kNumAllocatableRegisters); |
| + |
| + |
| bool LAllocator::TryAllocateFreeReg(LiveRange* current) { |
| - LifetimePosition max_pos = LifetimePosition::FromInstructionIndex( |
| - chunk_->instructions()->length() + 1); |
| - ASSERT(DoubleRegister::kNumAllocatableRegisters >= |
| - Register::kNumAllocatableRegisters); |
| - EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters> |
| - free_pos(max_pos); |
| + LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters]; |
| + |
| + for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) { |
| + free_until_pos[i] = LifetimePosition::MaxPosition(); |
| + } |
| + |
| for (int i = 0; i < active_live_ranges_.length(); ++i) { |
| LiveRange* cur_active = active_live_ranges_.at(i); |
| - free_pos[cur_active->assigned_register()] = |
| + free_until_pos[cur_active->assigned_register()] = |
| LifetimePosition::FromInstructionIndex(0); |
| } |
| @@ -1748,65 +1751,81 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) { |
| cur_inactive->FirstIntersection(current); |
| if (!next_intersection.IsValid()) continue; |
| int cur_reg = cur_inactive->assigned_register(); |
| - free_pos[cur_reg] = Min(free_pos[cur_reg], next_intersection); |
| + free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection); |
| } |
| - UsePosition* pos = current->FirstPosWithHint(); |
| - if (pos != NULL) { |
| - LOperand* hint = pos->hint(); |
| + UsePosition* hinted_use = current->FirstPosWithHint(); |
| + if (hinted_use != NULL) { |
| + LOperand* hint = hinted_use->hint(); |
| if (hint->IsRegister() || hint->IsDoubleRegister()) { |
| int register_index = hint->index(); |
| - TraceAlloc("Found reg hint %d for live range %d (free [%d, end %d[)\n", |
| - register_index, |
| - current->id(), |
| - free_pos[register_index].Value(), |
| - current->End().Value()); |
| - if (free_pos[register_index].Value() >= current->End().Value()) { |
| + TraceAlloc( |
| + "Found reg hint %d (free until [%d) for live range %d (end %d[).\n", |
| + register_index, |
|
Kevin Millikin (Chromium)
2010/12/10 07:39:19
Maybe it's not helpful here, but there's probably
Vyacheslav Egorov (Chromium)
2010/12/10 14:18:27
Yes, I know. I was just lazy to introduce helper (
|
| + free_until_pos[register_index].Value(), |
| + current->id(), |
| + current->End().Value()); |
| + |
| + // Desired register is free until end of current live range. |
|
Kevin Millikin (Chromium)
2010/12/10 07:39:19
The desired....until the end...
Vyacheslav Egorov (Chromium)
2010/12/10 14:18:27
Done.
|
| + if (free_until_pos[register_index].Value() >= current->End().Value()) { |
| TraceAlloc("Assigning preferred reg %d to live range %d\n", |
| register_index, |
| current->id()); |
| - current->set_assigned_register(register_index, mode_ == XMM_REGISTERS); |
| + current->set_assigned_register(register_index, mode_); |
| return true; |
| } |
| } |
| } |
| - int max_reg = 0; |
| + // Find register that stays free for the longest time. |
|
Kevin Millikin (Chromium)
2010/12/10 07:39:19
Find the register
Vyacheslav Egorov (Chromium)
2010/12/10 14:18:27
Done.
|
| + int reg = 0; |
| for (int i = 1; i < RegisterCount(); ++i) { |
| - if (free_pos[i].Value() > free_pos[max_reg].Value()) { |
| - max_reg = i; |
| + if (free_until_pos[i].Value() > free_until_pos[reg].Value()) { |
| + reg = i; |
| } |
| } |
| - if (free_pos[max_reg].InstructionIndex() == 0) { |
| + LifetimePosition pos = free_until_pos[reg]; |
| + |
| + if (pos.Value() <= current->Start().Value()) { |
| + // All registers are blocked. |
| return false; |
| - } else if (free_pos[max_reg].Value() >= current->End().Value()) { |
| - TraceAlloc("Assigning reg %d to live range %d\n", max_reg, current->id()); |
| - current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS); |
| - } else { |
| - // Split the interval before first use position of max_reg and never split |
| - // it interval at its start position. |
| - LifetimePosition pos = free_pos[max_reg]; |
| - if (pos.Value() <= current->Start().Value()) return false; |
| - LiveRange* second_range = Split(current, pos); |
| - AddToUnhandledSorted(second_range); |
| - current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS); |
| } |
| + if (pos.Value() < current->End().Value()) { |
| + // Register reg is available at the range start but becomes blocked before |
| + // the range end. Split current at position where it becomes blocked. |
| + LiveRange* tail = SplitAt(current, pos); |
| + AddToUnhandledSorted(tail); |
| + } |
| + |
| + |
| + // Register reg is available at the range start and is free until |
| + // the range end. |
| + ASSERT(pos.Value() >= current->End().Value()); |
| + TraceAlloc("Assigning reg %d to live range %d\n", reg, current->id()); |
| + current->set_assigned_register(reg, mode_); |
| + |
| return true; |
| } |
| void LAllocator::AllocateBlockedReg(LiveRange* current) { |
| - LifetimePosition max_pos = |
| - LifetimePosition::FromInstructionIndex( |
| - chunk_->instructions()->length() + 1); |
| - ASSERT(DoubleRegister::kNumAllocatableRegisters >= |
| - Register::kNumAllocatableRegisters); |
| - EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters> |
| - use_pos(max_pos); |
| - EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters> |
| - block_pos(max_pos); |
| + UsePosition* register_use = current->NextRegisterPosition(current->Start()); |
| + if (register_use == NULL) { |
| + // There is no use in currect live range that requires register. |
|
Kevin Millikin (Chromium)
2010/12/10 07:39:19
the current live range that requires a register
Vyacheslav Egorov (Chromium)
2010/12/10 14:18:27
Done.
|
| + // We can just spill it. |
| + Spill(current); |
| + return; |
| + } |
| + |
| + |
| + LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters]; |
| + LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters]; |
| + |
| + for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) { |
| + use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition(); |
| + } |
| for (int i = 0; i < active_live_ranges_.length(); ++i) { |
| LiveRange* range = active_live_ranges_[i]; |
| @@ -1839,30 +1858,46 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) { |
| } |
| } |
| - int max_reg = 0; |
| + int reg = 0; |
| for (int i = 1; i < RegisterCount(); ++i) { |
| - if (use_pos[i].Value() > use_pos[max_reg].Value()) { |
| - max_reg = i; |
| + if (use_pos[i].Value() > use_pos[reg].Value()) { |
| + reg = i; |
| } |
| } |
| - UsePosition* first_usage = current->NextRegisterPosition(current->Start()); |
| - if (first_usage == NULL) { |
| - Spill(current); |
| - } else if (use_pos[max_reg].Value() < first_usage->pos().Value()) { |
| - SplitAndSpill(current, current->Start(), first_usage->pos()); |
| - } else { |
| - if (block_pos[max_reg].Value() < current->End().Value()) { |
| - // Split current before blocked position. |
| - LiveRange* second_range = Split(current, |
| - current->Start(), |
| - block_pos[max_reg]); |
| - AddToUnhandledSorted(second_range); |
| - } |
| + LifetimePosition pos = use_pos[reg]; |
| - current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS); |
| - SplitAndSpillIntersecting(current); |
| + if (pos.Value() < register_use->pos().Value()) { |
| + // All registers are blocked before the first use that requires register. |
|
Kevin Millikin (Chromium)
2010/12/10 07:39:19
that requires a register. Spill the starting part
Vyacheslav Egorov (Chromium)
2010/12/10 14:18:27
Done.
|
| + // Spill starting part of live range upto that use. |
| + // |
| + // Corner case: first use position is equal to start of range. |
|
Kevin Millikin (Chromium)
2010/12/10 07:39:19
the first use...the start of the range
Vyacheslav Egorov (Chromium)
2010/12/10 14:18:27
Done.
|
| + // In this case we have nothing to spill and SpillBetween will just return |
| + // this range to list unhandled ones. This means that we have unsatisfieable |
| + // register constraints and we have to abort to avoid infinite loop. |
| + CHECK(current->Start().Value() < register_use->pos().Value()); |
|
Kevin Millikin (Chromium)
2010/12/10 07:39:19
We should consider what to do here. CHECK will cr
Vyacheslav Egorov (Chromium)
2010/12/10 14:18:27
I replaced check with assert.
Yes. Gracefully bai
|
| + SpillBetween(current, current->Start(), register_use->pos()); |
| + return; |
| } |
| + |
| + if (block_pos[reg].Value() < current->End().Value()) { |
| + // Register becomes blocked before current range end. Split before that |
| + // position. |
| + LiveRange* tail = SplitBetween(current, |
| + current->Start(), |
| + block_pos[reg].InstructionStart()); |
| + AddToUnhandledSorted(tail); |
| + } |
| + |
| + // Register reg is not blocked for the whole range. |
| + ASSERT(block_pos[reg].Value() >= current->End().Value()); |
| + TraceAlloc("Assigning reg %d to live range %d\n", reg, current->id()); |
| + current->set_assigned_register(reg, mode_); |
| + |
| + // This register was not free. Thus we need to find and spill |
| + // parts of active and inactive live regions that use the same register |
| + // at the same lifetime positions as current. |
| + SplitAndSpillIntersecting(current); |
| } |
| @@ -1875,9 +1910,9 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) { |
| if (range->assigned_register() == reg) { |
| UsePosition* next_pos = range->NextRegisterPosition(current->Start()); |
| if (next_pos == NULL) { |
| - SplitAndSpill(range, split_pos); |
| + SpillAfter(range, split_pos); |
| } else { |
| - SplitAndSpill(range, split_pos, next_pos->pos()); |
| + SpillBetween(range, split_pos, next_pos->pos()); |
| } |
| ActiveToHandled(range); |
| --i; |
| @@ -1892,10 +1927,10 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) { |
| if (next_intersection.IsValid()) { |
| UsePosition* next_pos = range->NextRegisterPosition(current->Start()); |
| if (next_pos == NULL) { |
| - SplitAndSpill(range, split_pos); |
| + SpillAfter(range, split_pos); |
| } else { |
| next_intersection = Min(next_intersection, next_pos->pos()); |
| - SplitAndSpill(range, split_pos, next_intersection); |
| + SpillBetween(range, split_pos, next_intersection); |
| } |
| InactiveToHandled(range); |
| --i; |
| @@ -1905,19 +1940,50 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) { |
| } |
| -LiveRange* LAllocator::Split(LiveRange* range, |
| - LifetimePosition start, |
| - LifetimePosition end) { |
| +bool LAllocator::IsBlockBoundary(LifetimePosition pos) { |
| + return pos.IsInstructionStart() && |
| + chunk_->instructions()->at(pos.InstructionIndex())->IsLabel(); |
| +} |
| + |
| + |
| +void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) { |
| + UsePosition* prev_pos = prev->AddUsePosition( |
| + LifetimePosition::FromInstructionIndex(pos)); |
| + UsePosition* next_pos = next->AddUsePosition( |
| + LifetimePosition::FromInstructionIndex(pos)); |
| + LOperand* prev_operand = prev_pos->operand(); |
| + LOperand* next_operand = next_pos->operand(); |
| + LGap* gap = chunk_->GetGapAt(pos); |
| + gap->GetOrCreateParallelMove(LGap::START)-> |
| + AddMove(prev_operand, next_operand); |
| + next_pos->set_hint(prev_operand); |
| +} |
| + |
| + |
| +LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) { |
| ASSERT(!range->IsFixed()); |
| - TraceAlloc("Splitting live range %d in position between [%d, %d[\n", |
| + TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value()); |
| + |
| + if (pos.Value() <= range->Start().Value()) return range; |
| + |
| + LiveRange* result = LiveRangeFor(next_virtual_register_++); |
| + range->SplitAt(pos, result); |
| + return result; |
| +} |
| + |
| + |
| +LiveRange* LAllocator::SplitBetween(LiveRange* range, |
| + LifetimePosition start, |
| + LifetimePosition end) { |
| + ASSERT(!range->IsFixed()); |
| + TraceAlloc("Splitting live range %d in position between [%d, %d]\n", |
| range->id(), |
| start.Value(), |
| end.Value()); |
| - LifetimePosition split_pos = FindOptimalSplitPos( |
| - start, end.PrevInstruction().InstructionEnd()); |
| + LifetimePosition split_pos = FindOptimalSplitPos(start, end); |
| ASSERT(split_pos.Value() >= start.Value()); |
| - return Split(range, split_pos); |
| + return SplitAt(range, split_pos); |
| } |
| @@ -1940,79 +2006,52 @@ LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start, |
| } |
| HBasicBlock* block = end_block; |
| - // Move to the most outside loop header. |
| + // Find header of outermost loop. |
| while (block->parent_loop_header() != NULL && |
| block->parent_loop_header()->block_id() > start_block->block_id()) { |
| block = block->parent_loop_header(); |
| } |
| - if (block == end_block) { |
| - return end; |
| - } |
| + if (block == end_block) return end; |
| return LifetimePosition::FromInstructionIndex( |
| block->first_instruction_index()); |
| } |
| -bool LAllocator::IsBlockBoundary(LifetimePosition pos) { |
| - return pos.IsInstructionStart() && |
| - chunk_->instructions()->at(pos.InstructionIndex())->IsLabel(); |
| +void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) { |
| + LiveRange* second_part = SplitAt(range, pos); |
| + Spill(second_part); |
| } |
| -void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) { |
| - UsePosition* prev_pos = prev->AddUsePosition( |
| - LifetimePosition::FromInstructionIndex(pos)); |
| - UsePosition* next_pos = next->AddUsePosition( |
| - LifetimePosition::FromInstructionIndex(pos)); |
| - LOperand* prev_operand = prev_pos->operand(); |
| - LOperand* next_operand = next_pos->operand(); |
| - LGap* gap = chunk_->GetGapAt(pos); |
| - gap->GetOrCreateParallelMove(LGap::START)-> |
| - AddMove(prev_operand, next_operand); |
| - next_pos->set_hint(prev_operand); |
| -} |
| - |
| +void LAllocator::SpillBetween(LiveRange* range, |
| + LifetimePosition start, |
| + LifetimePosition end) { |
| + ASSERT(start.Value() < end.Value()); |
| + LiveRange* second_part = SplitAt(range, start); |
| -LiveRange* LAllocator::Split(LiveRange* range, LifetimePosition pos) { |
| - ASSERT(!range->IsFixed()); |
| - TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value()); |
| - if (pos.Value() <= range->Start().Value()) { |
| - return range; |
| - } |
| - LiveRange* result = LiveRangeFor(next_virtual_register_++); |
| - range->SplitAt(pos, result); |
| - return result; |
| -} |
| + if (second_part->Start().Value() < end.Value()) { |
| + // Split result intersects with [start, end[. |
| + // Split it at position between ]start+1, end[, spill middle part |
| + // and put the rest to unhandled. |
| + LiveRange* third_part = SplitBetween( |
| + second_part, |
| + second_part->Start().InstructionEnd(), |
| + end.PrevInstruction().InstructionEnd()); |
| + ASSERT(third_part != second_part); |
| -void LAllocator::SplitAndSpill(LiveRange* range, |
| - LifetimePosition start, |
| - LifetimePosition end) { |
| - // We have an interval range and want to make sure that it is |
| - // spilled at start and at most spilled until end. |
| - ASSERT(start.Value() < end.Value()); |
| - LiveRange* tail_part = Split(range, start); |
| - if (tail_part->Start().Value() < end.Value()) { |
| - LiveRange* third_part = Split(tail_part, |
| - tail_part->Start().NextInstruction(), |
| - end); |
| - Spill(tail_part); |
| - ASSERT(third_part != tail_part); |
| + Spill(second_part); |
| AddToUnhandledSorted(third_part); |
| } else { |
| - AddToUnhandledSorted(tail_part); |
| + // Split result does not intersect with [start, end[. |
| + // Nothing to spill. Just put split result to unhandled as whole. |
| + AddToUnhandledSorted(second_part); |
| } |
| } |
| -void LAllocator::SplitAndSpill(LiveRange* range, LifetimePosition at) { |
| - LiveRange* second_part = Split(range, at); |
| - Spill(second_part); |
| -} |
| - |
| - |
| void LAllocator::Spill(LiveRange* range) { |
| ASSERT(!range->IsSpilled()); |
| TraceAlloc("Spilling live range %d\n", range->id()); |
| @@ -2020,7 +2059,7 @@ void LAllocator::Spill(LiveRange* range) { |
| if (!first->HasAllocatedSpillOperand()) { |
| LOperand* op = TryReuseSpillSlot(range); |
| - if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == XMM_REGISTERS); |
| + if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS); |
| first->SetSpillOperand(op); |
| } |
| range->MakeSpilled(); |