OLD | NEW |
| (Empty) |
1 // Copyright 2015 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "src/interpreter/bytecode-register-allocator.h" | |
6 | |
7 #include "src/interpreter/bytecode-array-builder.h" | |
8 | |
9 namespace v8 { | |
10 namespace internal { | |
11 namespace interpreter { | |
12 | |
13 TemporaryRegisterAllocator::TemporaryRegisterAllocator(Zone* zone, | |
14 int allocation_base) | |
15 : free_temporaries_(zone), | |
16 allocation_base_(allocation_base), | |
17 allocation_count_(0), | |
18 observer_(nullptr) {} | |
19 | |
20 Register TemporaryRegisterAllocator::first_temporary_register() const { | |
21 DCHECK(allocation_count() > 0); | |
22 return Register(allocation_base()); | |
23 } | |
24 | |
25 Register TemporaryRegisterAllocator::last_temporary_register() const { | |
26 DCHECK(allocation_count() > 0); | |
27 return Register(allocation_base() + allocation_count() - 1); | |
28 } | |
29 | |
30 void TemporaryRegisterAllocator::set_observer( | |
31 TemporaryRegisterObserver* observer) { | |
32 DCHECK(observer_ == nullptr); | |
33 observer_ = observer; | |
34 } | |
35 | |
36 int TemporaryRegisterAllocator::AllocateTemporaryRegister() { | |
37 allocation_count_ += 1; | |
38 return allocation_base() + allocation_count() - 1; | |
39 } | |
40 | |
41 int TemporaryRegisterAllocator::BorrowTemporaryRegister() { | |
42 if (free_temporaries_.empty()) { | |
43 return AllocateTemporaryRegister(); | |
44 } else { | |
45 auto pos = free_temporaries_.begin(); | |
46 int retval = *pos; | |
47 free_temporaries_.erase(pos); | |
48 return retval; | |
49 } | |
50 } | |
51 | |
52 int TemporaryRegisterAllocator::BorrowTemporaryRegisterNotInRange( | |
53 int start_index, int end_index) { | |
54 if (free_temporaries_.empty()) { | |
55 int next_allocation = allocation_base() + allocation_count(); | |
56 while (next_allocation >= start_index && next_allocation <= end_index) { | |
57 free_temporaries_.insert(AllocateTemporaryRegister()); | |
58 next_allocation += 1; | |
59 } | |
60 return AllocateTemporaryRegister(); | |
61 } | |
62 | |
63 ZoneSet<int>::iterator index = free_temporaries_.lower_bound(start_index); | |
64 if (index == free_temporaries_.begin()) { | |
65 // If start_index is the first free register, check for a register | |
66 // greater than end_index. | |
67 index = free_temporaries_.upper_bound(end_index); | |
68 if (index == free_temporaries_.end()) { | |
69 return AllocateTemporaryRegister(); | |
70 } | |
71 } else { | |
72 // If there is a free register < start_index | |
73 index--; | |
74 } | |
75 | |
76 int retval = *index; | |
77 free_temporaries_.erase(index); | |
78 return retval; | |
79 } | |
80 | |
81 int TemporaryRegisterAllocator::PrepareForConsecutiveTemporaryRegisters( | |
82 size_t count) { | |
83 if (count == 0) { | |
84 return -1; | |
85 } | |
86 | |
87 // TODO(oth): replace use of set<> here for free_temporaries with a | |
88 // more efficient structure. And/or partition into two searches - | |
89 // one before the translation window and one after. | |
90 | |
91 // A run will require at least |count| free temporaries. | |
92 while (free_temporaries_.size() < count) { | |
93 free_temporaries_.insert(AllocateTemporaryRegister()); | |
94 } | |
95 | |
96 // Search within existing temporaries for a run. | |
97 auto start = free_temporaries_.begin(); | |
98 size_t run_length = 0; | |
99 for (auto run_end = start; run_end != free_temporaries_.end(); run_end++) { | |
100 int expected = *start + static_cast<int>(run_length); | |
101 if (*run_end != expected) { | |
102 start = run_end; | |
103 run_length = 0; | |
104 } | |
105 if (++run_length == count) { | |
106 return *start; | |
107 } | |
108 } | |
109 | |
110 // Continue run if possible across existing last temporary. | |
111 if (allocation_count_ > 0 && (start == free_temporaries_.end() || | |
112 *start + static_cast<int>(run_length) != | |
113 last_temporary_register().index() + 1)) { | |
114 run_length = 0; | |
115 } | |
116 | |
117 // Pad temporaries if extended run would cross translation boundary. | |
118 Register reg_first(*start); | |
119 Register reg_last(*start + static_cast<int>(count) - 1); | |
120 | |
121 // Ensure enough registers for run. | |
122 while (run_length++ < count) { | |
123 free_temporaries_.insert(AllocateTemporaryRegister()); | |
124 } | |
125 | |
126 int run_start = | |
127 last_temporary_register().index() - static_cast<int>(count) + 1; | |
128 return run_start; | |
129 } | |
130 | |
131 bool TemporaryRegisterAllocator::RegisterIsLive(Register reg) const { | |
132 if (allocation_count_ > 0) { | |
133 DCHECK(reg >= first_temporary_register() && | |
134 reg <= last_temporary_register()); | |
135 return free_temporaries_.find(reg.index()) == free_temporaries_.end(); | |
136 } else { | |
137 return false; | |
138 } | |
139 } | |
140 | |
141 void TemporaryRegisterAllocator::BorrowConsecutiveTemporaryRegister( | |
142 int reg_index) { | |
143 DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end()); | |
144 free_temporaries_.erase(reg_index); | |
145 } | |
146 | |
147 void TemporaryRegisterAllocator::ReturnTemporaryRegister(int reg_index) { | |
148 DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end()); | |
149 free_temporaries_.insert(reg_index); | |
150 if (observer_) { | |
151 observer_->TemporaryRegisterFreeEvent(Register(reg_index)); | |
152 } | |
153 } | |
154 | |
155 BytecodeRegisterAllocator::BytecodeRegisterAllocator( | |
156 Zone* zone, TemporaryRegisterAllocator* allocator) | |
157 : base_allocator_(allocator), | |
158 allocated_(zone), | |
159 next_consecutive_register_(-1), | |
160 next_consecutive_count_(-1) {} | |
161 | |
162 BytecodeRegisterAllocator::~BytecodeRegisterAllocator() { | |
163 for (auto i = allocated_.rbegin(); i != allocated_.rend(); i++) { | |
164 base_allocator()->ReturnTemporaryRegister(*i); | |
165 } | |
166 allocated_.clear(); | |
167 } | |
168 | |
169 Register BytecodeRegisterAllocator::NewRegister() { | |
170 int allocated = -1; | |
171 if (next_consecutive_count_ <= 0) { | |
172 allocated = base_allocator()->BorrowTemporaryRegister(); | |
173 } else { | |
174 allocated = base_allocator()->BorrowTemporaryRegisterNotInRange( | |
175 next_consecutive_register_, | |
176 next_consecutive_register_ + next_consecutive_count_ - 1); | |
177 } | |
178 allocated_.push_back(allocated); | |
179 return Register(allocated); | |
180 } | |
181 | |
182 bool BytecodeRegisterAllocator::RegisterIsAllocatedInThisScope( | |
183 Register reg) const { | |
184 for (auto i = allocated_.begin(); i != allocated_.end(); i++) { | |
185 if (*i == reg.index()) return true; | |
186 } | |
187 return false; | |
188 } | |
189 | |
190 void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) { | |
191 if (static_cast<int>(count) > next_consecutive_count_) { | |
192 next_consecutive_register_ = | |
193 base_allocator()->PrepareForConsecutiveTemporaryRegisters(count); | |
194 next_consecutive_count_ = static_cast<int>(count); | |
195 } | |
196 } | |
197 | |
198 Register BytecodeRegisterAllocator::NextConsecutiveRegister() { | |
199 DCHECK_GE(next_consecutive_register_, 0); | |
200 DCHECK_GT(next_consecutive_count_, 0); | |
201 base_allocator()->BorrowConsecutiveTemporaryRegister( | |
202 next_consecutive_register_); | |
203 allocated_.push_back(next_consecutive_register_); | |
204 next_consecutive_count_--; | |
205 return Register(next_consecutive_register_++); | |
206 } | |
207 | |
208 } // namespace interpreter | |
209 } // namespace internal | |
210 } // namespace v8 | |
OLD | NEW |