Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(169)

Side by Side Diff: src/compiler/greedy-allocator.h

Issue 1205173002: [turbofan] Greedy allocator refactoring. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_GREEDY_ALLOCATOR_H_ 5 #ifndef V8_GREEDY_ALLOCATOR_H_
6 #define V8_GREEDY_ALLOCATOR_H_ 6 #define V8_GREEDY_ALLOCATOR_H_
7 7
8 #include "src/compiler/coalesced-live-ranges.h"
8 #include "src/compiler/register-allocator.h" 9 #include "src/compiler/register-allocator.h"
9 #include "src/zone-containers.h" 10 #include "src/zone-containers.h"
10 11
11 namespace v8 { 12 namespace v8 {
12 namespace internal { 13 namespace internal {
13 namespace compiler { 14 namespace compiler {
14 15
15 class CoalescedLiveRanges; 16
17 // The object of allocation scheduling. At minimum, this is a LiveRange, but
18 // we may extend this to groups of LiveRanges. It has to be comparable.
19 // TODO(mtrofin): size calculation may happen redundantly for a range, consider
20 // making size a member of LiveRange.
21 struct AllocationCandidate {
22 explicit AllocationCandidate(LiveRange* range);
23 bool operator<(const AllocationCandidate& other) const {
24 return size_ < other.size_;
25 }
Benedikt Meurer 2015/06/25 04:41:06 Nit: Please add an appropriate operator> if you de
Mircea Trofin 2015/06/25 23:06:22 Done.
26 LiveRange* live_range() const { return range_; }
27 unsigned size() const { return size_; }
28
29 private:
30 LiveRange* range_;
31 unsigned size_;
32 };
33
34
35 // Schedule processing (allocating) of AllocationCandidates.
36 class AllocationScheduler final : ZoneObject {
37 public:
38 explicit AllocationScheduler(Zone* zone) : storage_(zone) {}
39 void Schedule(LiveRange* range) { storage_.push(AllocationCandidate(range)); }
40 AllocationCandidate GetNext();
41 bool empty() const { return storage_.empty(); }
42
43 private:
44 typedef ZonePriorityQueue<AllocationCandidate> ScheduleStorage;
45 ScheduleStorage storage_;
46
47 DISALLOW_COPY_AND_ASSIGN(AllocationScheduler);
48 };
16 49
17 50
18 // A variant of the LLVM Greedy Register Allocator. See 51 // A variant of the LLVM Greedy Register Allocator. See
19 // http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html 52 // http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
20 class GreedyAllocator final : public RegisterAllocator { 53 class GreedyAllocator final : public RegisterAllocator {
21 public: 54 public:
22 explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind, 55 explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind,
23 Zone* local_zone); 56 Zone* local_zone);
24 57
25 void AllocateRegisters(); 58 void AllocateRegisters();
26 59
27 private: 60 private:
28 LifetimePosition GetSplittablePos(LifetimePosition pos); 61 AllocationScheduler& scheduler() { return scheduler_; }
29 const RegisterConfiguration* config() const { return data()->config(); } 62 CoalescedLiveRanges* current_allocations(unsigned i) {
63 return allocations_[i];
64 }
30 Zone* local_zone() const { return local_zone_; } 65 Zone* local_zone() const { return local_zone_; }
31 66
32 int GetHintedRegister(LiveRange* range); 67 // Insert fixed ranges.
68 void InitializeAllocations();
titzer 2015/06/24 23:05:44 Why not just name the method after what it does? I
Mircea Trofin 2015/06/25 23:06:23 Good suggestion. I'm proposing PreallocateFixedRan
33 69
34 typedef ZonePriorityQueue<std::pair<unsigned, LiveRange*>> PQueue; 70 // Schedule unassigned live ranges for allocation.
71 // TODO(mtrofin): groups.
72 void InitializeScheduler();
titzer 2015/06/24 23:05:44 Same here.
Mircea Trofin 2015/06/25 23:06:23 Done.
35 73
36 unsigned GetLiveRangeSize(LiveRange* range); 74 // Find the optimal split for ranges defined by a memory operand, e.g.
37 void Enqueue(LiveRange* range); 75 // constants or function parameters passed on the stack.
76 void HandleRangesDefinedByMemoryOperand();
titzer 2015/06/24 23:05:44 "handle" is too general. Please name the method af
38 77
39 void Evict(LiveRange* range); 78 void ProcessCandidate(const AllocationCandidate& candidate);
titzer 2015/06/24 23:05:44 What does Process mean?
Mircea Trofin 2015/06/25 23:06:23 Done.
40 float CalculateSpillWeight(LiveRange* range); 79 void ProcessLiveRange(LiveRange* range, unsigned size);
41 float CalculateMaxSpillWeight(const ZoneSet<LiveRange*>& ranges);
42 80
81 bool CanProcessRange(LiveRange* range) const {
82 return range != nullptr && !range->IsEmpty() && range->kind() == mode();
83 }
43 84
44 bool TryAllocate(LiveRange* current, ZoneSet<LiveRange*>* conflicting); 85 // Calculate the weight of a candidate for allocation.
45 bool TryAllocatePhysicalRegister(unsigned reg_id, LiveRange* range, 86 float GetCandidateRangeWeight(LiveRange* range, unsigned size);
titzer 2015/06/24 23:05:44 Even though this is camel case (and thus CouldPerf
Mircea Trofin 2015/06/25 23:06:23 Done by virtue of having had changed a bit more on
46 ZoneSet<LiveRange*>* conflicting); 87
88 // Calculate the new weight of a range that is about to be allocated.
89 float GetAllocatedRangeWeight(float candidate_weight);
90
91 // TODO(mtrofin): HandleSpillOperands feel like belonging to
92 // a preceding phase, rather than here.
47 bool HandleSpillOperands(LiveRange* range); 93 bool HandleSpillOperands(LiveRange* range);
titzer 2015/06/24 23:05:44 Again, no idea what "Handle" means here.
Mircea Trofin 2015/06/25 23:06:23 Done.
48 void AllocateBlockedRange(LiveRange* current, LifetimePosition pos,
49 bool spill);
50 94
51 LiveRange* SpillBetweenUntil(LiveRange* range, LifetimePosition start, 95 // This is the extension point for splitting heuristics.
52 LifetimePosition until, LifetimePosition end); 96 void HandleBlockedRange(LiveRange* range);
53 void AssignRangeToRegister(int reg_id, LiveRange* range);
54 97
55 LifetimePosition FindProgressingSplitPosition(LiveRange* range, 98 // Necessary heuristic: spill when all else failed.
56 bool* is_spill_pos); 99 void SpillRangeAsLastResort(LiveRange* range);
100
101 void AssignRangeToRegister(int reg_id, LiveRange* range, float weight);
57 102
58 Zone* local_zone_; 103 Zone* local_zone_;
59 ZoneVector<CoalescedLiveRanges*> allocations_; 104 ZoneVector<CoalescedLiveRanges*> allocations_;
60 PQueue queue_; 105 AllocationScheduler scheduler_;
61 DISALLOW_COPY_AND_ASSIGN(GreedyAllocator); 106 DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
62 }; 107 };
63 } // namespace compiler 108 } // namespace compiler
64 } // namespace internal 109 } // namespace internal
65 } // namespace v8 110 } // namespace v8
66 #endif // V8_GREEDY_ALLOCATOR_H_ 111 #endif // V8_GREEDY_ALLOCATOR_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698