| OLD | NEW |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/greedy-allocator.h" | 5 #include "src/compiler/greedy-allocator.h" |
| 6 #include "src/compiler/register-allocator.h" | 6 #include "src/compiler/register-allocator.h" |
| 7 | 7 |
| 8 namespace v8 { | 8 namespace v8 { |
| 9 namespace internal { | 9 namespace internal { |
| 10 namespace compiler { | 10 namespace compiler { |
| 11 | 11 |
| 12 | 12 |
| 13 #define TRACE(...) \ | 13 #define TRACE(...) \ |
| 14 do { \ | 14 do { \ |
| 15 if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ | 15 if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ |
| 16 } while (false) | 16 } while (false) |
| 17 | 17 |
| 18 | 18 |
| 19 const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0; | 19 const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0; |
| 20 | 20 |
| 21 | |
| 22 namespace { | 21 namespace { |
| 23 | 22 |
| 24 | 23 |
| 25 void UpdateOperands(LiveRange* range, RegisterAllocationData* data) { | 24 void UpdateOperands(LiveRange* range, RegisterAllocationData* data) { |
| 26 int reg_id = range->assigned_register(); | 25 int reg_id = range->assigned_register(); |
| 27 range->SetUseHints(reg_id); | 26 range->SetUseHints(reg_id); |
| 28 if (range->is_phi()) { | 27 if (range->is_phi()) { |
| 29 data->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id); | 28 data->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id); |
| 30 } | 29 } |
| 31 } | 30 } |
| 32 | 31 |
| 33 | 32 |
| 34 LiveRange* Split(LiveRange* range, RegisterAllocationData* data, | 33 LiveRange* Split(LiveRange* range, RegisterAllocationData* data, |
| 35 LifetimePosition pos) { | 34 LifetimePosition pos, bool update_size = true) { |
| 36 DCHECK(range->Start() < pos && pos < range->End()); | 35 DCHECK(range->Start() < pos && pos < range->End()); |
| 37 DCHECK(pos.IsStart() || pos.IsGapPosition() || | 36 DCHECK(pos.IsStart() || pos.IsGapPosition() || |
| 38 (data->code() | 37 (data->code() |
| 39 ->GetInstructionBlock(pos.ToInstructionIndex()) | 38 ->GetInstructionBlock(pos.ToInstructionIndex()) |
| 40 ->last_instruction_index() != pos.ToInstructionIndex())); | 39 ->last_instruction_index() != pos.ToInstructionIndex())); |
| 41 LiveRange* result = data->NewChildRangeFor(range); | 40 LiveRange* result = data->NewChildRangeFor(range); |
| 42 range->SplitAt(pos, result, data->allocation_zone()); | 41 range->SplitAt(pos, result, data->allocation_zone()); |
| 42 if (update_size) { |
| 43 range->UpdateSize(); |
| 44 result->UpdateSize(); |
| 45 } |
| 46 TRACE("Split range %d(v%d) @%d => %d.\n", range->id(), |
| 47 range->TopLevel()->id(), pos.ToInstructionIndex(), result->id()); |
| 43 return result; | 48 return result; |
| 44 } | 49 } |
| 45 | 50 |
| 46 | 51 |
| 52 void SpillToStackSlot(LiveRange* range, RegisterAllocationData* data) { |
| 53 DCHECK(!range->spilled()); |
| 54 TRACE("Spilling live range %d(v%d)\n", range->id(), range->TopLevel()->id()); |
| 55 auto first = range->TopLevel(); |
| 56 if (first->HasNoSpillType()) { |
| 57 data->AssignSpillRangeToLiveRange(first); |
| 58 } |
| 59 range->Spill(); |
| 60 } |
| 61 |
| 62 |
| 47 // TODO(mtrofin): explain why splitting in gap START is always OK. | 63 // TODO(mtrofin): explain why splitting in gap START is always OK. |
| 48 LifetimePosition GetSplitPositionForInstruction(const LiveRange* range, | 64 LifetimePosition GetSplitPositionForInstruction(const LiveRange* range, |
| 49 const InstructionSequence* code, | 65 const InstructionSequence* code, |
| 50 int instruction_index) { | 66 int instruction_index) { |
| 51 LifetimePosition ret = LifetimePosition::Invalid(); | 67 LifetimePosition ret = LifetimePosition::Invalid(); |
| 52 | 68 |
| 53 ret = LifetimePosition::GapFromInstructionIndex(instruction_index); | 69 ret = LifetimePosition::GapFromInstructionIndex(instruction_index); |
| 54 if (range->Start() >= ret || ret >= range->End()) { | 70 if (range->Start() >= ret || ret >= range->End()) { |
| 55 return LifetimePosition::Invalid(); | 71 return LifetimePosition::Invalid(); |
| 56 } | 72 } |
| 57 return ret; | 73 return ret; |
| 58 } | 74 } |
| 59 | 75 |
| 60 | 76 |
| 61 int GetFirstGapIndex(const UseInterval* interval) { | 77 int GetFirstGapIndex(const UseInterval* interval) { |
| 62 LifetimePosition start = interval->start(); | 78 LifetimePosition start = interval->start(); |
| 63 int ret = start.ToInstructionIndex(); | 79 int ret = start.ToInstructionIndex(); |
| 64 return ret; | 80 return ret; |
| 65 } | 81 } |
| 66 | 82 |
| 67 | 83 |
| 68 int GetLastGapIndex(const UseInterval* interval) { | 84 int GetLastGapIndex(const UseInterval* interval) { |
| 69 LifetimePosition end = interval->end(); | 85 LifetimePosition end = interval->end(); |
| 70 return end.ToInstructionIndex(); | 86 return end.ToInstructionIndex(); |
| 71 } | 87 } |
| 72 | 88 |
| 73 | 89 |
| 90 int GetFirstInstructionIndex(const UseInterval* interval) { |
| 91 int ret = interval->start().ToInstructionIndex(); |
| 92 if (!interval->start().IsGapPosition() && !interval->start().IsStart()) { |
| 93 ++ret; |
| 94 } |
| 95 return ret; |
| 96 } |
| 97 |
| 98 |
| 99 int GetLastInstructionIndex(const UseInterval* interval) { |
| 100 int ret = interval->end().ToInstructionIndex(); |
| 101 if (interval->end().IsGapPosition() || interval->end().IsStart()) { |
| 102 // Meaning, this is either a gap or instruction start |
| 103 --ret; |
| 104 } |
| 105 return ret; |
| 106 } |
| 107 |
| 108 |
| 109 LifetimePosition GetFirstSplitPosFollowingCall(const LiveRange* range, |
| 110 const InstructionSequence* code, |
| 111 int call_pos) { |
| 112 int next_pos = call_pos + 1; |
| 113 const InstructionBlock* block = code->GetInstructionBlock(call_pos); |
| 114 if (next_pos == block->last_instruction_index()) { |
| 115 // This may be a throwing call. Look through the successors for a |
| 116 // handler. If there is one, then we pick the next position as the |
| 117 // closest block start out of the successors: |
| 118 // - if the next block is the handler, then splitting at the first position |
| 119 // ensures a "fill" move instruction. The normal control flow block will |
| 120 // evaluate false for LiveRangeConnector::CanEagerlyResolveControlFlow when |
| 121 // we do the LiveRangeConnector::ResolveControlFlow phase, so that will |
| 122 // ensure a fill move for it, too. |
| 123 // - if the next block is the normal flow, then the same argument as above |
| 124 // holds, but reversing the block roles. |
| 125 // If there is no handler, just use call_pos + 1. |
| 126 int outside_pos = static_cast<int>(code->instructions().size()); |
| 127 int alternative_next_pos = outside_pos; |
| 128 bool found_handler = false; |
| 129 |
| 130 for (RpoNumber successor_id : block->successors()) { |
| 131 const InstructionBlock* successor = |
| 132 code->InstructionBlockAt(successor_id); |
| 133 if (successor->IsHandler()) { |
| 134 DCHECK(!found_handler); |
| 135 found_handler = true; |
| 136 } |
| 137 int first_succ_instruction = successor->first_instruction_index(); |
| 138 alternative_next_pos = Min(alternative_next_pos, first_succ_instruction); |
| 139 } |
| 140 if (found_handler) { |
| 141 DCHECK_NE(outside_pos, alternative_next_pos); |
| 142 next_pos = alternative_next_pos; |
| 143 } |
| 144 } |
| 145 return GetSplitPositionForInstruction(range, code, next_pos); |
| 146 } |
| 147 |
| 148 |
| 149 bool DoesInstructionClobberRange(const LiveRange* range, |
| 150 const Instruction* instruction) { |
| 151 return instruction->IsCall(); |
| 152 } |
| 153 |
| 154 |
| 155 // Split range just before the instr_index, and then at the closest position |
| 156 // the control flow resumes, and return the range after that position, if any. |
| 157 LiveRange* SplitRangeAtClobberingInstruction(LiveRange* range, int index, |
| 158 RegisterAllocationData* data) { |
| 159 LiveRange* ret = nullptr; |
| 160 const InstructionSequence* code = data->code(); |
| 161 |
| 162 LifetimePosition before_call = |
| 163 GetSplitPositionForInstruction(range, code, index); |
| 164 LiveRange* call_part = nullptr; |
| 165 if (before_call.IsValid()) { |
| 166 call_part = Split(range, data, before_call, false); |
| 167 } else { |
| 168 // This range starts with the call. |
| 169 call_part = range; |
| 170 } |
| 171 |
| 172 LifetimePosition after_call = |
| 173 GetFirstSplitPosFollowingCall(call_part, code, index); |
| 174 if (after_call.IsValid()) { |
| 175 ret = Split(call_part, data, after_call, false); |
| 176 } |
| 177 SpillToStackSlot(call_part, data); |
| 178 return ret; |
| 179 } |
| 180 |
| 181 |
| 182 // Split before a call that requires parameters on stack, and spill until |
| 183 // the first position requiring the parameter back in a register. |
| 184 // The range must not be fixed. |
| 185 void SplitRangeAroundClobberingInstructions(LiveRange* range, |
| 186 RegisterAllocationData* data) { |
| 187 DCHECK(!range->IsFixed()); |
| 188 // return; |
| 189 const InstructionSequence* code = data->code(); |
| 190 |
| 191 LiveRange* current_subrange = range; |
| 192 int top_id = range->id(); |
| 193 UseInterval* interval = current_subrange->first_interval(); |
| 194 while (interval != nullptr) { |
| 195 int first_index = GetFirstInstructionIndex(interval); |
| 196 int last_index = GetLastInstructionIndex(interval); |
| 197 interval = interval->next(); |
| 198 |
| 199 for (int index = first_index; index <= last_index; ++index) { |
| 200 const Instruction* instr = code->InstructionAt(index); |
| 201 if (DoesInstructionClobberRange(current_subrange, instr)) { |
| 202 TRACE("Range %d is clobbered by instruction at index %d.\n", top_id, |
| 203 index); |
| 204 current_subrange = |
| 205 SplitRangeAtClobberingInstruction(current_subrange, index, data); |
| 206 interval = current_subrange == nullptr |
| 207 ? nullptr |
| 208 : current_subrange->first_interval(); |
| 209 break; |
| 210 } |
| 211 } |
| 212 } |
| 213 } |
| 214 |
| 215 |
| 216 bool DoesSubsequenceClobber(const InstructionSequence* code, int start, |
| 217 int stop) { |
| 218 for (int i = start; i <= stop; ++i) { |
| 219 if (code->InstructionAt(i)->IsCall()) return true; |
| 220 } |
| 221 return false; |
| 222 } |
| 223 |
| 224 |
| 225 // If the block has a throwing call, when the control flow takes the exception |
| 226 // handler path, the last instruction in the block - a jump to the normal |
| 227 // execution path - isn't executed. Implicitly, moves there won't be executed. |
| 228 // So we will try to split at the closest successor instead, |
| 229 LiveRange* SplitRangeAfterBlock(LiveRange* range, RegisterAllocationData* data, |
| 230 const InstructionBlock* block) { |
| 231 const InstructionSequence* code = data->code(); |
| 232 int instr_index = block->last_instruction_index(); |
| 233 int outside_index = static_cast<int>(code->instructions().size()); |
| 234 bool has_handler = false; |
| 235 for (auto successor_id : block->successors()) { |
| 236 const InstructionBlock* successor = code->InstructionBlockAt(successor_id); |
| 237 if (successor->IsHandler()) { |
| 238 has_handler = true; |
| 239 } |
| 240 outside_index = Min(outside_index, successor->first_instruction_index()); |
| 241 } |
| 242 if (!has_handler) { |
| 243 // We can split at the end of the block, to ensure the fill happens here, |
| 244 // however, we still need to split again at the beginning of the closest |
| 245 // successor so that the control flow connector doesn't attempt to use the |
| 246 // locally spilled slot. |
| 247 LifetimePosition at_block_end = |
| 248 GetSplitPositionForInstruction(range, code, instr_index); |
| 249 if (!at_block_end.IsValid()) return nullptr; |
| 250 range = Split(range, data, at_block_end); |
| 251 } |
| 252 instr_index = outside_index; |
| 253 |
| 254 LifetimePosition after_block = |
| 255 GetSplitPositionForInstruction(range, code, instr_index); |
| 256 |
| 257 if (after_block.IsValid()) { |
| 258 return Split(range, data, after_block); |
| 259 } else { |
| 260 return nullptr; |
| 261 } |
| 262 } |
| 263 |
| 264 |
| 265 void SplitRangeByClobberingDeferredBlocks(LiveRange* range, |
| 266 RegisterAllocationData* data) { |
| 267 DCHECK(!range->IsFixed()); |
| 268 DCHECK(!range->spilled()); |
| 269 |
| 270 const InstructionSequence* code = data->code(); |
| 271 LiveRange* current_subrange = range; |
| 272 |
| 273 UseInterval* interval = current_subrange->first_interval(); |
| 274 |
| 275 while (interval != nullptr) { |
| 276 int first_index = GetFirstInstructionIndex(interval); |
| 277 int last_index = interval->end().ToInstructionIndex(); |
| 278 |
| 279 if (last_index >= code->instructions().size()) { |
| 280 last_index = static_cast<int>(code->instructions().size() - 1); |
| 281 } |
| 282 interval = interval->next(); |
| 283 |
| 284 for (int index = first_index; index <= last_index;) { |
| 285 const InstructionBlock* block = code->GetInstructionBlock(index); |
| 286 int working_index = index; |
| 287 index = block->last_instruction_index() + 1; |
| 288 |
| 289 if (!block->IsDeferred() || |
| 290 !DoesSubsequenceClobber( |
| 291 code, working_index, |
| 292 Min(last_index, block->last_instruction_index()))) { |
| 293 continue; |
| 294 } |
| 295 |
| 296 TRACE("Deferred block B%d clobbers range %d(v%d).\n", |
| 297 block->rpo_number().ToInt(), current_subrange->id(), |
| 298 current_subrange->TopLevel()->id()); |
| 299 LifetimePosition block_start = |
| 300 GetSplitPositionForInstruction(current_subrange, code, working_index); |
| 301 LiveRange* block_and_after = nullptr; |
| 302 if (block_start.IsValid()) { |
| 303 block_and_after = Split(current_subrange, data, block_start); |
| 304 } else { |
| 305 block_and_after = current_subrange; |
| 306 } |
| 307 LiveRange* after_block = |
| 308 SplitRangeAfterBlock(block_and_after, data, block); |
| 309 if (after_block == nullptr && block_and_after != current_subrange) { |
| 310 after_block = block_and_after; |
| 311 } |
| 312 current_subrange = after_block == nullptr ? nullptr : after_block; |
| 313 interval = current_subrange == nullptr |
| 314 ? nullptr |
| 315 : current_subrange->first_interval(); |
| 316 break; |
| 317 } |
| 318 } |
| 319 } |
| 320 |
| 321 |
| 74 // Basic heuristic for advancing the algorithm, if any other splitting heuristic | 322 // Basic heuristic for advancing the algorithm, if any other splitting heuristic |
| 75 // failed. | 323 // failed. |
| 76 LifetimePosition GetLastResortSplitPosition(const LiveRange* range, | 324 LifetimePosition GetLastResortSplitPosition(const LiveRange* range, |
| 77 const InstructionSequence* code) { | 325 const InstructionSequence* code) { |
| 78 if (range->first_interval()->next() != nullptr) { | 326 if (range->first_interval()->next() != nullptr) { |
| 79 return range->first_interval()->next()->start(); | 327 return range->first_interval()->next()->start(); |
| 80 } | 328 } |
| 81 | 329 |
| 82 UseInterval* interval = range->first_interval(); | 330 UseInterval* interval = range->first_interval(); |
| 83 int first = GetFirstGapIndex(interval); | 331 int first = GetFirstGapIndex(interval); |
| 84 int last = GetLastGapIndex(interval); | 332 int last = GetLastGapIndex(interval); |
| 85 if (first == last) return LifetimePosition::Invalid(); | 333 if (first == last) return LifetimePosition::Invalid(); |
| 86 | 334 |
| 87 // TODO(mtrofin:) determine why we can't just split somewhere arbitrary | 335 // TODO(mtrofin:) determine why we can't just split somewhere arbitrary |
| 88 // within the range, e.g. it's middle. | 336 // within the range, e.g. its middle. |
| 89 for (UsePosition* pos = range->first_pos(); pos != nullptr; | 337 for (UsePosition* pos = range->first_pos(); pos != nullptr; |
| 90 pos = pos->next()) { | 338 pos = pos->next()) { |
| 91 if (pos->type() != UsePositionType::kRequiresRegister) continue; | 339 if (pos->type() != UsePositionType::kRequiresRegister) continue; |
| 92 LifetimePosition before = GetSplitPositionForInstruction( | 340 LifetimePosition before = GetSplitPositionForInstruction( |
| 93 range, code, pos->pos().ToInstructionIndex()); | 341 range, code, pos->pos().ToInstructionIndex()); |
| 94 if (before.IsValid()) return before; | 342 if (before.IsValid()) return before; |
| 95 LifetimePosition after = GetSplitPositionForInstruction( | 343 LifetimePosition after = GetSplitPositionForInstruction( |
| 96 range, code, pos->pos().ToInstructionIndex() + 1); | 344 range, code, pos->pos().ToInstructionIndex() + 1); |
| 97 if (after.IsValid()) return after; | 345 if (after.IsValid()) return after; |
| 98 } | 346 } |
| (...skipping 11 matching lines...) Expand all Loading... |
| 110 | 358 |
| 111 AllocationCandidate AllocationScheduler::GetNext() { | 359 AllocationCandidate AllocationScheduler::GetNext() { |
| 112 DCHECK(!queue_.empty()); | 360 DCHECK(!queue_.empty()); |
| 113 AllocationCandidate ret = queue_.top(); | 361 AllocationCandidate ret = queue_.top(); |
| 114 queue_.pop(); | 362 queue_.pop(); |
| 115 return ret; | 363 return ret; |
| 116 } | 364 } |
| 117 | 365 |
| 118 | 366 |
| 119 void AllocationScheduler::Schedule(LiveRange* range) { | 367 void AllocationScheduler::Schedule(LiveRange* range) { |
| 120 TRACE("Scheduling live range %d.\n", range->id()); | 368 TRACE("Scheduling live range %d(v%d).\n", range->id(), |
| 369 range->TopLevel()->id()); |
| 370 DCHECK(range->IsSizeValid()); |
| 121 queue_.push(AllocationCandidate(range)); | 371 queue_.push(AllocationCandidate(range)); |
| 122 } | 372 } |
| 123 | 373 |
| 124 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data, | 374 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data, |
| 125 RegisterKind kind, Zone* local_zone) | 375 RegisterKind kind, Zone* local_zone) |
| 126 : RegisterAllocator(data, kind), | 376 : RegisterAllocator(data, kind), |
| 127 local_zone_(local_zone), | 377 local_zone_(local_zone), |
| 128 allocations_(local_zone), | 378 allocations_(local_zone), |
| 129 scheduler_(local_zone) {} | 379 scheduler_(local_zone) {} |
| 130 | 380 |
| 131 | 381 |
| 132 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) { | 382 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) { |
| 133 TRACE("Assigning register %s to live range %d\n", RegisterName(reg_id), | 383 TRACE("Assigning register %s to live range %d(v%d).\n", RegisterName(reg_id), |
| 134 range->id()); | 384 range->id(), range->TopLevel()->id()); |
| 135 | 385 |
| 136 DCHECK(!range->HasRegisterAssigned()); | 386 DCHECK(!range->HasRegisterAssigned()); |
| 137 | 387 |
| 138 AllocateRegisterToRange(reg_id, range); | 388 AllocateRegisterToRange(reg_id, range); |
| 139 | 389 |
| 140 TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id()); | 390 TRACE("Assigning %s to range %d(v%d).\n", RegisterName(reg_id), range->id(), |
| 391 range->TopLevel()->id()); |
| 141 range->set_assigned_register(reg_id); | 392 range->set_assigned_register(reg_id); |
| 142 } | 393 } |
| 143 | 394 |
| 144 | 395 |
| 145 void GreedyAllocator::PreallocateFixedRanges() { | 396 void GreedyAllocator::PreallocateFixedRanges() { |
| 146 allocations_.resize(num_registers()); | 397 allocations_.resize(num_registers()); |
| 147 for (int i = 0; i < num_registers(); i++) { | 398 for (int i = 0; i < num_registers(); i++) { |
| 148 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone()); | 399 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone()); |
| 149 } | 400 } |
| 150 | 401 |
| 151 for (LiveRange* fixed_range : GetFixedRegisters()) { | 402 for (LiveRange* fixed_range : GetFixedRegisters()) { |
| 152 if (fixed_range != nullptr) { | 403 if (fixed_range != nullptr) { |
| 153 DCHECK_EQ(mode(), fixed_range->kind()); | 404 DCHECK_EQ(mode(), fixed_range->kind()); |
| 154 DCHECK(fixed_range->IsFixed()); | 405 DCHECK(fixed_range->IsFixed()); |
| 155 | 406 |
| 156 int reg_nr = fixed_range->assigned_register(); | 407 int reg_nr = fixed_range->assigned_register(); |
| 157 EnsureValidRangeWeight(fixed_range); | 408 EnsureValidRangeWeight(fixed_range); |
| 158 AllocateRegisterToRange(reg_nr, fixed_range); | 409 AllocateRegisterToRange(reg_nr, fixed_range); |
| 159 } | 410 } |
| 160 } | 411 } |
| 161 } | 412 } |
| 162 | 413 |
| 163 | 414 |
| 164 void GreedyAllocator::ScheduleAllocationCandidates() { | 415 void GreedyAllocator::ScheduleAllocationCandidates() { |
| 165 for (auto range : data()->live_ranges()) { | 416 for (auto range : data()->live_ranges()) { |
| 166 if (CanProcessRange(range) && !range->spilled()) { | 417 if (CanProcessRange(range) && !range->spilled()) { |
| 418 range->UpdateSize(); |
| 167 scheduler().Schedule(range); | 419 scheduler().Schedule(range); |
| 168 } | 420 } |
| 169 } | 421 } |
| 170 } | 422 } |
| 171 | 423 |
| 172 | 424 |
| 173 void GreedyAllocator::TryAllocateCandidate( | 425 void GreedyAllocator::TryAllocateCandidate( |
| 174 const AllocationCandidate& candidate) { | 426 const AllocationCandidate& candidate) { |
| 175 // At this point, this is just a live range. TODO: groups. | 427 // At this point, this is just a live range. TODO: groups. |
| 176 TryAllocateLiveRange(candidate.live_range()); | 428 TryAllocateLiveRange(candidate.live_range()); |
| 177 } | 429 } |
| 178 | 430 |
| 179 | 431 |
| 180 void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) { | 432 void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) { |
| 181 // TODO(mtrofin): once we introduce groups, we'll want to first try and | 433 // TODO(mtrofin): once we introduce groups, we'll want to first try and |
| 182 // allocate at the preferred register. | 434 // allocate at the preferred register. |
| 183 TRACE("Attempting to allocate live range %d\n", range->id()); | 435 TRACE("Attempting to allocate live range %d(v%d).\n", range->id(), |
| 436 range->TopLevel()->id()); |
| 184 int free_reg = -1; | 437 int free_reg = -1; |
| 185 int evictable_reg = -1; | 438 int evictable_reg = -1; |
| 186 EnsureValidRangeWeight(range); | 439 EnsureValidRangeWeight(range); |
| 187 DCHECK(range->weight() != LiveRange::kInvalidWeight); | 440 DCHECK(range->weight() != LiveRange::kInvalidWeight); |
| 188 | 441 |
| 189 float smallest_weight = LiveRange::kMaxWeight; | 442 float smallest_weight = LiveRange::kMaxWeight; |
| 190 | 443 |
| 191 // Seek either the first free register, or, from the set of registers | 444 // Seek either the first free register, or, from the set of registers |
| 192 // where the maximum conflict is lower than the candidate's weight, the one | 445 // where the maximum conflict is lower than the candidate's weight, the one |
| 193 // with the smallest such weight. | 446 // with the smallest such weight. |
| 194 for (int i = 0; i < num_registers(); i++) { | 447 for (int i = 0; i < num_registers(); i++) { |
| 195 float max_conflict_weight = GetMaximumConflictingWeight(i, range); | 448 float max_conflict_weight = GetMaximumConflictingWeight(i, range); |
| 196 if (max_conflict_weight == LiveRange::kInvalidWeight) { | 449 if (max_conflict_weight == LiveRange::kInvalidWeight) { |
| 197 free_reg = i; | 450 free_reg = i; |
| 198 break; | 451 break; |
| 199 } | 452 } |
| 200 if (max_conflict_weight < range->weight() && | 453 if (max_conflict_weight < range->weight() && |
| 201 max_conflict_weight < smallest_weight) { | 454 max_conflict_weight < smallest_weight) { |
| 202 smallest_weight = max_conflict_weight; | 455 smallest_weight = max_conflict_weight; |
| 203 evictable_reg = i; | 456 evictable_reg = i; |
| 204 } | 457 } |
| 205 } | 458 } |
| 206 | 459 |
| 207 // We have a free register, so we use it. | 460 // We have a free register, so we use it. |
| 208 if (free_reg >= 0) { | 461 if (free_reg >= 0) { |
| 209 TRACE("Found free register %s for live range %d\n", RegisterName(free_reg), | 462 TRACE("Found free register %s for live range %d.\n", RegisterName(free_reg), |
| 210 range->id()); | 463 range->id()); |
| 211 AssignRangeToRegister(free_reg, range); | 464 AssignRangeToRegister(free_reg, range); |
| 212 return; | 465 return; |
| 213 } | 466 } |
| 214 | 467 |
| 215 // We found a register to perform evictions, so we evict and allocate our | 468 // We found a register to perform evictions, so we evict and allocate our |
| 216 // candidate. | 469 // candidate. |
| 217 if (evictable_reg >= 0) { | 470 if (evictable_reg >= 0) { |
| 218 TRACE("Found evictable register %s for live range %d\n", | 471 TRACE("Found evictable register %s for live range %d.\n", |
| 219 RegisterName(free_reg), range->id()); | 472 RegisterName(free_reg), range->id()); |
| 220 EvictAndRescheduleConflicts(evictable_reg, range); | 473 EvictAndRescheduleConflicts(evictable_reg, range); |
| 221 AssignRangeToRegister(evictable_reg, range); | 474 AssignRangeToRegister(evictable_reg, range); |
| 222 return; | 475 return; |
| 223 } | 476 } |
| 224 | 477 |
| 225 // The range needs to be split or spilled. | 478 // The range needs to be split or spilled. |
| 226 SplitOrSpillBlockedRange(range); | 479 SplitOrSpillBlockedRange(range); |
| 227 } | 480 } |
| 228 | 481 |
| 229 | 482 |
| 230 void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id, | 483 void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id, |
| 231 const LiveRange* range) { | 484 const LiveRange* range) { |
| 232 auto conflicts = current_allocations(reg_id)->GetConflicts(range); | 485 auto conflicts = current_allocations(reg_id)->GetConflicts(range); |
| 233 for (LiveRange* conflict = conflicts.Current(); conflict != nullptr; | 486 for (LiveRange* conflict = conflicts.Current(); conflict != nullptr; |
| 234 conflict = conflicts.RemoveCurrentAndGetNext()) { | 487 conflict = conflicts.RemoveCurrentAndGetNext()) { |
| 235 DCHECK(conflict->HasRegisterAssigned()); | 488 DCHECK(conflict->HasRegisterAssigned()); |
| 236 CHECK(!conflict->IsFixed()); | 489 CHECK(!conflict->IsFixed()); |
| 237 conflict->UnsetAssignedRegister(); | 490 conflict->UnsetAssignedRegister(); |
| 238 UpdateWeightAtEviction(conflict); | 491 UpdateWeightAtEviction(conflict); |
| 239 scheduler().Schedule(conflict); | 492 scheduler().Schedule(conflict); |
| 240 TRACE("Evicted range %d.\n", conflict->id()); | 493 TRACE("Evicted range %d(v%d).\n", conflict->id(), |
| 494 conflict->TopLevel()->id()); |
| 241 } | 495 } |
| 242 } | 496 } |
| 243 | 497 |
| 244 | 498 |
| 245 void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() { | 499 void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() { |
| 246 size_t initial_range_count = data()->live_ranges().size(); | 500 size_t initial_range_count = data()->live_ranges().size(); |
| 247 for (size_t i = 0; i < initial_range_count; ++i) { | 501 for (size_t i = 0; i < initial_range_count; ++i) { |
| 248 auto range = data()->live_ranges()[i]; | 502 auto range = data()->live_ranges()[i]; |
| 249 if (!CanProcessRange(range)) continue; | 503 if (!CanProcessRange(range)) continue; |
| 250 if (range->HasNoSpillType()) continue; | 504 if (!range->HasSpillOperand()) continue; |
| 505 if (range->IsChild()) continue; |
| 251 | 506 |
| 252 LifetimePosition start = range->Start(); | 507 LifetimePosition start = range->Start(); |
| 253 TRACE("Live range %d is defined by a spill operand.\n", range->id()); | 508 TRACE("Live range %d is defined by a spill operand.\n", range->id()); |
| 254 auto next_pos = start; | 509 auto next_pos = start; |
| 255 if (next_pos.IsGapPosition()) { | 510 if (next_pos.IsGapPosition()) { |
| 256 next_pos = next_pos.NextStart(); | 511 next_pos = next_pos.NextStart(); |
| 257 } | 512 } |
| 258 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); | 513 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); |
| 259 // If the range already has a spill operand and it doesn't need a | 514 // If the range already has a spill operand and it doesn't need a |
| 260 // register immediately, split it and spill the first part of the range. | 515 // register immediately, split it and spill the first part of the range. |
| 261 if (pos == nullptr) { | 516 if (pos == nullptr) { |
| 262 Spill(range); | 517 SpillToStackSlot(range, data()); |
| 263 } else if (pos->pos() > range->Start().NextStart()) { | 518 } else if (pos->pos() > range->Start().NextStart()) { |
| 264 // Do not spill live range eagerly if use position that can benefit from | 519 // Do not spill live range eagerly if use position that can benefit from |
| 265 // the register is too close to the start of live range. | 520 // the register is too close to the start of live range. |
| 266 auto split_pos = pos->pos(); | 521 auto split_pos = GetSplitPositionForInstruction( |
| 267 if (data()->IsBlockBoundary(split_pos.Start())) { | 522 range, code(), pos->pos().ToInstructionIndex()); |
| 268 split_pos = split_pos.Start(); | 523 if (split_pos.IsValid()) { |
| 269 } else { | 524 Split(range, data(), split_pos); |
| 270 split_pos = split_pos.PrevStart().End(); | 525 SpillToStackSlot(range, data()); |
| 271 } | 526 } |
| 272 Split(range, data(), split_pos); | |
| 273 Spill(range); | |
| 274 } | 527 } |
| 275 } | 528 } |
| 276 } | 529 } |
| 277 | 530 |
| 278 | 531 |
| 279 void GreedyAllocator::AllocateRegisters() { | 532 void GreedyAllocator::AllocateRegisters() { |
| 280 CHECK(scheduler().empty()); | 533 CHECK(scheduler().empty()); |
| 281 CHECK(allocations_.empty()); | 534 CHECK(allocations_.empty()); |
| 282 | 535 |
| 283 TRACE("Begin allocating function %s with the Greedy Allocator\n", | 536 TRACE("Begin allocating function %s with the Greedy Allocator\n", |
| 284 data()->debug_name()); | 537 data()->debug_name()); |
| 285 | 538 |
| 539 size_t live_range_count = data()->live_ranges().size(); |
| 540 for (size_t i = 0; i < live_range_count; i++) { |
| 541 LiveRange* range = data()->live_ranges()[i]; |
| 542 if (CanProcessRange(range) && !range->spilled() && !range->IsFixed() && |
| 543 !range->IsChild()) { |
| 544 SplitRangeByClobberingDeferredBlocks(range, data()); |
| 545 } |
| 546 } |
| 547 |
| 548 live_range_count = data()->live_ranges().size(); |
| 549 for (size_t i = 0; i < live_range_count; i++) { |
| 550 LiveRange* range = data()->live_ranges()[i]; |
| 551 if (CanProcessRange(range) && !range->spilled() && !range->IsFixed()) { |
| 552 SplitRangeAroundClobberingInstructions(range, data()); |
| 553 } |
| 554 } |
| 555 |
| 286 SplitAndSpillRangesDefinedByMemoryOperand(); | 556 SplitAndSpillRangesDefinedByMemoryOperand(); |
| 557 |
| 287 PreallocateFixedRanges(); | 558 PreallocateFixedRanges(); |
| 288 ScheduleAllocationCandidates(); | 559 ScheduleAllocationCandidates(); |
| 289 | 560 |
| 290 while (!scheduler().empty()) { | 561 while (!scheduler().empty()) { |
| 291 AllocationCandidate candidate = scheduler().GetNext(); | 562 AllocationCandidate candidate = scheduler().GetNext(); |
| 292 TryAllocateCandidate(candidate); | 563 TryAllocateCandidate(candidate); |
| 293 } | 564 } |
| 294 | 565 |
| 295 | 566 |
| 296 // We do not rely on the hint mechanism used by LinearScan, so no need to | 567 // We do not rely on the hint mechanism used by LinearScan, so no need to |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 341 } | 612 } |
| 342 if (!IsProgressPossible(range, code())) { | 613 if (!IsProgressPossible(range, code())) { |
| 343 range->set_weight(LiveRange::kMaxWeight); | 614 range->set_weight(LiveRange::kMaxWeight); |
| 344 return; | 615 return; |
| 345 } | 616 } |
| 346 | 617 |
| 347 float use_count = 0.0; | 618 float use_count = 0.0; |
| 348 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) { | 619 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) { |
| 349 ++use_count; | 620 ++use_count; |
| 350 } | 621 } |
| 351 range->set_weight(use_count / static_cast<float>(range->GetSize())); | 622 DCHECK(range->IsSizeValid()); |
| 623 range->set_weight(use_count / static_cast<float>(range->size())); |
| 352 } | 624 } |
| 353 | 625 |
| 354 | 626 |
| 355 void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) { | 627 void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) { |
| 356 LifetimePosition start = range->Start(); | 628 LifetimePosition start = range->Start(); |
| 357 CHECK(range->CanBeSpilled(start)); | 629 CHECK(range->CanBeSpilled(start)); |
| 358 | 630 |
| 359 DCHECK(range->NextRegisterPosition(start) == nullptr); | 631 DCHECK(range->NextRegisterPosition(start) == nullptr); |
| 360 Spill(range); | 632 SpillToStackSlot(range, data()); |
| 361 } | 633 } |
| 362 | 634 |
| 363 | 635 |
| 364 void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) { | 636 void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) { |
| 365 // TODO(mtrofin): replace the call below with the entry point selecting | 637 // TODO(mtrofin): replace the call below with the entry point selecting |
| 366 // heuristics, once they exist, out of which GLRSP is the last one. | 638 // heuristics, once they exist, out of which GLRSP is the last one. |
| 367 auto pos = GetLastResortSplitPosition(range, code()); | 639 auto pos = GetLastResortSplitPosition(range, code()); |
| 368 if (pos.IsValid()) { | 640 if (pos.IsValid()) { |
| 369 LiveRange* tail = Split(range, data(), pos); | 641 LiveRange* tail = Split(range, data(), pos); |
| 370 DCHECK(tail != range); | 642 DCHECK(tail != range); |
| 371 scheduler().Schedule(tail); | 643 scheduler().Schedule(tail); |
| 372 scheduler().Schedule(range); | 644 scheduler().Schedule(range); |
| 373 return; | 645 return; |
| 374 } | 646 } |
| 375 SpillRangeAsLastResort(range); | 647 SpillRangeAsLastResort(range); |
| 376 } | 648 } |
| 377 | 649 |
| 378 | 650 |
| 379 } // namespace compiler | 651 } // namespace compiler |
| 380 } // namespace internal | 652 } // namespace internal |
| 381 } // namespace v8 | 653 } // namespace v8 |
| OLD | NEW |