OLD | NEW |
(Empty) | |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "src/compiler/live-range-separator.h" |
| 6 #include "src/compiler/register-allocator.h" |
| 7 |
| 8 namespace v8 { |
| 9 namespace internal { |
| 10 namespace compiler { |
| 11 |
| 12 |
| 13 #define TRACE(...) \ |
| 14 do { \ |
| 15 if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ |
| 16 } while (false) |
| 17 |
| 18 |
| 19 namespace { |
| 20 |
| 21 // Starting from a deferred block, find the last consecutive deferred block. |
| 22 RpoNumber GetLastDeferredBlock(const InstructionBlock *block, |
| 23 const InstructionSequence *code) { |
| 24 DCHECK(block->IsDeferred()); |
| 25 RpoNumber first = block->rpo_number(); |
| 26 |
| 27 RpoNumber last = first; |
| 28 for (int i = first.ToInt(); i < code->InstructionBlockCount(); ++i) { |
| 29 RpoNumber at_i = RpoNumber::FromInt(i); |
| 30 const InstructionBlock *block_at_i = code->InstructionBlockAt(at_i); |
| 31 if (!block_at_i->IsDeferred()) break; |
| 32 last = at_i; |
| 33 } |
| 34 |
| 35 return last; |
| 36 } |
| 37 |
| 38 |
| 39 // Delimits consecutive deferred block sequences. |
| 40 void AssociateDeferredBlockSequences(InstructionSequence *code) { |
| 41 for (int blk_id = 0; blk_id < code->InstructionBlockCount(); ++blk_id) { |
| 42 InstructionBlock *block = |
| 43 code->InstructionBlockAt(RpoNumber::FromInt(blk_id)); |
| 44 if (!block->IsDeferred()) continue; |
| 45 RpoNumber last = GetLastDeferredBlock(block, code); |
| 46 block->set_last_deferred(last); |
| 47 // We know last is still deferred, and that last + 1, is not (or is an |
| 48 // invalid index). So skip over last + 1 and continue from last + 2. This |
| 49 // way, we visit each block exactly once, and the total complexity of this |
| 50 // function is O(n), n being jthe number of blocks. |
| 51 blk_id = last.ToInt() + 1; |
| 52 } |
| 53 } |
| 54 |
| 55 |
| 56 // If the live range has a liveness hole right between start and end, |
| 57 // we don't need to splinter it. |
| 58 bool IsIntervalAlreadyExcluded(const LiveRange *range, LifetimePosition start, |
| 59 LifetimePosition end) { |
| 60 for (UseInterval *interval = range->first_interval(); interval != nullptr; |
| 61 interval = interval->next()) { |
| 62 if (interval->start() <= start && start < interval->end()) return false; |
| 63 if (interval->start() < end && end <= interval->end()) return false; |
| 64 } |
| 65 return true; |
| 66 } |
| 67 |
| 68 |
| 69 void CreateSplinter(LiveRange *range, RegisterAllocationData *data, |
| 70 LifetimePosition first_cut, LifetimePosition last_cut) { |
| 71 DCHECK(!range->IsChild()); |
| 72 DCHECK(!range->IsSplinter()); |
| 73 // We can ignore ranges that live solely in deferred blocks. |
| 74 // If a range ends right at the end of a deferred block, it is marked by |
| 75 // the range builder as ending at gap start of the next block - since the |
| 76 // end is a position where the variable isn't live. We need to take that |
| 77 // into consideration. |
| 78 LifetimePosition max_allowed_end = last_cut.NextFullStart(); |
| 79 |
| 80 if (first_cut <= range->Start() && max_allowed_end >= range->End()) { |
| 81 return; |
| 82 } |
| 83 |
| 84 LifetimePosition start = Max(first_cut, range->Start()); |
| 85 LifetimePosition end = Min(last_cut, range->End()); |
| 86 // Skip ranges that have a hole where the deferred block(s) are. |
| 87 if (IsIntervalAlreadyExcluded(range, start, end)) return; |
| 88 |
| 89 if (start < end) { |
| 90 // Ensure the original range has a spill range associated, before it gets |
| 91 // splintered. Splinters will point to it. This way, when attempting to |
| 92 // reuse spill slots of splinters, during allocation, we avoid clobbering |
| 93 // such slots. |
| 94 if (range->MayRequireSpillRange()) { |
| 95 data->CreateSpillRangeForLiveRange(range); |
| 96 } |
| 97 LiveRange *result = data->NewChildRangeFor(range); |
| 98 Zone *zone = data->allocation_zone(); |
| 99 range->Splinter(start, end, result, zone); |
| 100 } |
| 101 } |
| 102 |
| 103 |
| 104 // Splinter all ranges live inside successive deferred blocks. |
| 105 // No control flow analysis is performed. After the register allocation, we will |
| 106 // merge the splinters back into the original ranges, and then rely on the |
| 107 // range connector to properly connect them. |
| 108 void SplinterRangesInDeferredBlocks(RegisterAllocationData *data) { |
| 109 InstructionSequence *code = data->code(); |
| 110 int code_block_count = code->InstructionBlockCount(); |
| 111 Zone *zone = data->allocation_zone(); |
| 112 ZoneVector<BitVector *> &in_sets = data->live_in_sets(); |
| 113 |
| 114 for (int i = 0; i < code_block_count; ++i) { |
| 115 InstructionBlock *block = code->InstructionBlockAt(RpoNumber::FromInt(i)); |
| 116 if (!block->IsDeferred()) continue; |
| 117 |
| 118 RpoNumber last_deferred = block->last_deferred(); |
| 119 i = last_deferred.ToInt(); |
| 120 |
| 121 LifetimePosition first_cut = LifetimePosition::GapFromInstructionIndex( |
| 122 block->first_instruction_index()); |
| 123 |
| 124 LifetimePosition last_cut = LifetimePosition::GapFromInstructionIndex( |
| 125 static_cast<int>(code->instructions().size())); |
| 126 |
| 127 const BitVector *in_set = in_sets[i]; |
| 128 InstructionBlock *last = code->InstructionBlockAt(last_deferred); |
| 129 const BitVector *out_set = LiveRangeBuilder::ComputeLiveOut(last, data); |
| 130 last_cut = LifetimePosition::GapFromInstructionIndex( |
| 131 last->last_instruction_index()); |
| 132 |
| 133 BitVector ranges_to_splinter(*in_set, zone); |
| 134 ranges_to_splinter.Union(*out_set); |
| 135 BitVector::Iterator iterator(&ranges_to_splinter); |
| 136 |
| 137 while (!iterator.Done()) { |
| 138 int range_id = iterator.Current(); |
| 139 iterator.Advance(); |
| 140 |
| 141 LiveRange *range = data->live_ranges()[range_id]; |
| 142 CreateSplinter(range, data, first_cut, last_cut); |
| 143 } |
| 144 } |
| 145 } |
| 146 } // namespace |
| 147 |
| 148 |
| 149 void LiveRangeSeparator::Splinter() { |
| 150 AssociateDeferredBlockSequences(data()->code()); |
| 151 SplinterRangesInDeferredBlocks(data()); |
| 152 } |
| 153 |
| 154 |
| 155 void LiveRangeMerger::Merge() { |
| 156 int live_range_count = static_cast<int>(data()->live_ranges().size()); |
| 157 for (int i = 0; i < live_range_count; ++i) { |
| 158 LiveRange *range = data()->live_ranges()[i]; |
| 159 if (range == nullptr || range->IsEmpty() || range->IsChild() || |
| 160 !range->IsSplinter()) { |
| 161 continue; |
| 162 } |
| 163 LiveRange *splinter_parent = range->splintered_from(); |
| 164 |
| 165 splinter_parent->Merge(range, data()); |
| 166 } |
| 167 } |
| 168 |
| 169 |
| 170 } // namespace compiler |
| 171 } // namespace internal |
| 172 } // namespace v8 |
OLD | NEW |