Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(10)

Side by Side Diff: src/interpreter/bytecode-register-optimizer.cc

Issue 1997653002: [interpreter] Bytecode register optimizer. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Try harder with source positions. Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/interpreter/bytecode-register-optimizer.h"
6
7 namespace v8 {
8 namespace internal {
9 namespace interpreter {
10
11 class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
12 public:
13 RegisterInfo(Register reg, int equivalence_id, bool materialized)
14 : register_(reg),
15 equivalence_id_(equivalence_id),
16 materialized_(materialized),
17 next_(this),
18 prev_(this) {}
19
20 void AddToEquivalenceSetOf(RegisterInfo* info);
21 void RemoveFromEquivalenceSet();
22 bool IsOnlyEquivalent() const;
23 bool IsOnlyMaterializedEquivalent() const;
24 bool IsEquivalent(RegisterInfo* info) const;
25
26 RegisterInfo* GetMaterializedEquivalent();
27 RegisterInfo* GetMaterializedEquivalentOtherThan(Register reg);
28 RegisterInfo* GetMaterializationCandidate();
29
30 Register register_value() const { return register_; }
31 bool materialized() const { return materialized_; }
32 void set_materialized(bool materialized) { materialized_ = materialized; }
33 void set_equivalence_id(int equivalence_id) {
34 equivalence_id_ = equivalence_id;
35 }
36 int equivalence_id() const { return equivalence_id_; }
37 RegisterInfo* next() const { return next_; }
38
39 private:
40 Register register_;
41 int equivalence_id_;
42 bool materialized_;
43
44 // Equivalence set pointers.
45 RegisterInfo* next_;
46 RegisterInfo* prev_;
47
48 DISALLOW_COPY_AND_ASSIGN(RegisterInfo);
49 };
50
51 void BytecodeRegisterOptimizer::RegisterInfo::AddToEquivalenceSetOf(
52 RegisterInfo* info) {
53 DCHECK(!IsEquivalent(info) && IsOnlyEquivalent());
54 this->next_ = info->next_;
55 this->prev_ = info;
56 this->prev_->next_ = this;
57 this->next_->prev_ = this;
58 set_equivalence_id(info->equivalence_id());
59 }
60
61 void BytecodeRegisterOptimizer::RegisterInfo::RemoveFromEquivalenceSet() {
62 this->next_->prev_ = this->prev_;
63 this->prev_->next_ = this->next_;
64 this->next_ = this->prev_ = this;
65 }
66
67 bool BytecodeRegisterOptimizer::RegisterInfo::IsOnlyEquivalent() const {
68 return this->next_ == this;
69 }
70
71 bool BytecodeRegisterOptimizer::RegisterInfo::IsOnlyMaterializedEquivalent()
72 const {
73 DCHECK(materialized());
74
75 const RegisterInfo* visitor = this->next_;
76 while (visitor != this) {
77 if (visitor->materialized()) {
78 return false;
79 }
80 visitor = visitor->next_;
81 }
82 return true;
83 }
84
85 bool BytecodeRegisterOptimizer::RegisterInfo::IsEquivalent(
86 RegisterInfo* info) const {
87 return equivalence_id() == info->equivalence_id();
88 }
89
90 BytecodeRegisterOptimizer::RegisterInfo*
91 BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalent() {
92 RegisterInfo* visitor = this;
93 do {
94 if (visitor->materialized()) {
95 return visitor;
96 }
97 visitor = visitor->next_;
98 } while (visitor != this);
99
100 return nullptr;
101 }
102
103 BytecodeRegisterOptimizer::RegisterInfo*
104 BytecodeRegisterOptimizer::RegisterInfo::GetMaterializationCandidate() {
105 DCHECK(this->materialized());
106 RegisterInfo* visitor = this->next_;
107 Register best_reg;
108 RegisterInfo* best_info = nullptr;
109 while (visitor != this) {
110 if (visitor->materialized()) {
111 return nullptr;
112 }
113 if (best_info == nullptr ||
114 visitor->register_value() < best_info->register_value()) {
115 best_info = visitor;
116 }
117 visitor = visitor->next_;
118 }
119 return best_info;
120 }
121
122 BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
123 Zone* zone, TemporaryRegisterAllocator* register_allocator,
124 int parameter_count, int fixed_register_count,
125 BytecodePipelineStage* next_stage)
126 : accumulator_(Register::bytecode_array()),
127 temporary_base_(register_allocator->allocation_base()),
128 register_map_(zone),
129 equivalence_id_(0),
130 next_stage_(next_stage),
131 flushed_(false),
132 zone_(zone) {
133 register_allocator->set_observer(this);
134
135 // Calculate offset so register index values can be mapped into
136 // a vector of register metadata.
137 if (parameter_count != 0) {
138 map_index_offset_ =
139 -Register::FromParameterIndex(0, parameter_count).index();
140 } else {
141 map_index_offset_ = -accumulator_.index();
142 }
143
144 register_map_.resize(map_index_offset_ +
145 static_cast<size_t>(fixed_register_count));
146
147 for (size_t i = 0; i < register_map_.size(); ++i) {
148 register_map_[i] = new (zone)
149 RegisterInfo(RegisterFromMapIndex(i), equivalence_id_++, true);
150 DCHECK_EQ(register_map_[i]->register_value().index(),
151 RegisterFromMapIndex(i).index());
152 }
153 accumulator_info_ = GetRegisterInfo(accumulator_);
154 DCHECK(accumulator_info_->register_value() == accumulator_ &&
155 accumulator_info_->materialized());
156 }
157
158 // override
159 void BytecodeRegisterOptimizer::FlushBasicBlock() {
160 next_stage_->FlushBasicBlock();
161 }
162
163 void BytecodeRegisterOptimizer::FlushState() {
164 size_t count = register_map_.size();
165 for (size_t i = 0; i < count; ++i) {
166 RegisterInfo* reg_info = register_map_[i];
167 if (!reg_info->materialized() && !reg_info->IsOnlyEquivalent()) {
168 DCHECK(RegisterIsTemporary(reg_info->register_value()) ||
169 reg_info->register_value() == accumulator_);
170 Materialize(reg_info);
171 }
172 }
173
174 for (size_t i = 0; i < count; ++i) {
175 RegisterInfo* reg_info = register_map_[i];
176 if (!reg_info->IsOnlyEquivalent()) {
177 reg_info->RemoveFromEquivalenceSet();
178 reg_info->set_equivalence_id(equivalence_id_++);
179 }
180 }
181 }
182
183 // override
184 size_t BytecodeRegisterOptimizer::FlushForOffset() {
185 if (!flushed_) {
186 FlushState();
187 }
188 return next_stage_->FlushForOffset();
189 }
190
191 void BytecodeRegisterOptimizer::EmitNop(BytecodeNode* node) {
192 BytecodeNode nop(Bytecode::kNop);
193 nop.source_info().Update(node->source_info());
194 WriteToNextStage(&nop);
195 node->source_info().set_invalid();
196 }
197
198 // override
199 void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
200 flushed_ = false;
201
202 switch (node->bytecode()) {
203 case Bytecode::kLdar: {
204 DoLdar(node);
205 EmitNop(node);
206 return;
207 }
208 case Bytecode::kStar: {
209 EmitNop(node);
210 DoStar(node);
211 return;
212 }
213 case Bytecode::kMov: {
214 EmitNop(node);
215 DoMov(node);
216 return;
217 }
218 case Bytecode::kReturn: {
219 // The accumulator holds the return value so needs to be materialized.
220 if (accumulator_info_->materialized() &&
221 !accumulator_info_->IsOnlyMaterializedEquivalent()) {
222 accumulator_info_->set_materialized(false);
223 }
224 Materialize(accumulator_info_);
225 WriteToNextStage(node);
226 return;
227 }
228 default:
229 break;
230 }
231
232 if (Bytecodes::IsJump(node->bytecode()) ||
233 node->bytecode() == Bytecode::kDebugger) {
234 // The debugger can manipulate locals and parameters, flush
235 // everything before handing over to it. Similarly, all
236 // state must be flushed before emitting a jump.
237 FlushState();
238 WriteToNextStage(node);
239 return;
240 }
241
242 PrepareOperands(node);
243 WriteToNextStage(node);
244 }
245
246 void BytecodeRegisterOptimizer::WriteToNextStage(BytecodeNode* node) {
247 next_stage_->Write(node);
248 }
249
250 void BytecodeRegisterOptimizer::OutputRegisterTransfer(
251 RegisterInfo* input_info, RegisterInfo* output_info) {
252 Register input = input_info->register_value();
253 Register output = output_info->register_value();
254 DCHECK_NE(input.index(), output.index());
255
256 if (input == accumulator_) {
257 uint32_t operand = static_cast<uint32_t>(output.ToOperand());
258 OperandScale scale = Bytecodes::OperandSizesToScale(output.SizeOfOperand());
259 BytecodeNode node(Bytecode::kStar, operand, scale);
260 WriteToNextStage(&node);
261 } else if (output == accumulator_) {
262 uint32_t operand = static_cast<uint32_t>(input.ToOperand());
263 OperandScale scale = Bytecodes::OperandSizesToScale(input.SizeOfOperand());
264 BytecodeNode node(Bytecode::kLdar, operand, scale);
265 WriteToNextStage(&node);
266 } else {
267 uint32_t operand0 = static_cast<uint32_t>(input.ToOperand());
268 uint32_t operand1 = static_cast<uint32_t>(output.ToOperand());
269 OperandScale scale = Bytecodes::OperandSizesToScale(input.SizeOfOperand(),
270 output.SizeOfOperand());
271 BytecodeNode node(Bytecode::kMov, operand0, operand1, scale);
272 WriteToNextStage(&node);
273 }
274 }
275
276 void BytecodeRegisterOptimizer::CreateMaterializedEquivalentIfRequired(
277 RegisterInfo* info) {
278 if (info->materialized()) {
279 RegisterInfo* unmaterialized = info->GetMaterializationCandidate();
280 if (unmaterialized) {
281 OutputRegisterTransfer(info, unmaterialized);
282 unmaterialized->set_materialized(true);
283 }
284 }
285 }
286
287 BytecodeRegisterOptimizer::RegisterInfo*
288 BytecodeRegisterOptimizer::GetMaterializedEquivalent(RegisterInfo* info) {
289 return info->materialized() ? info : info->GetMaterializedEquivalent();
290 }
291
292 BytecodeRegisterOptimizer::RegisterInfo*
293 BytecodeRegisterOptimizer::GetMaterializedEquivalentNotAccumulator(
294 RegisterInfo* info) {
295 if (info->materialized()) {
296 return info;
297 }
298
299 RegisterInfo* result = info->GetMaterializedEquivalent();
300 if (result->register_value() == accumulator_) {
301 result = result->next()->GetMaterializedEquivalent();
302 if (result->register_value() == accumulator_) {
303 Materialize(info);
304 result = info;
305 }
306 }
307 return result;
308 }
309
310 void BytecodeRegisterOptimizer::Materialize(RegisterInfo* info) {
311 if (!info->materialized()) {
312 RegisterInfo* materialized = info->GetMaterializedEquivalent();
313 OutputRegisterTransfer(materialized, info);
314 info->set_materialized(true);
315 }
316 }
317
318 void BytecodeRegisterOptimizer::RegisterTransfer(RegisterInfo* input_info,
319 RegisterInfo* output_info) {
320 // Ensure value associated with output is materialized into a
321 // different register if necessary.
322 CreateMaterializedEquivalentIfRequired(output_info);
323 bool output_is_observable =
324 RegisterIsObservable(output_info->register_value());
325
326 // If the output and input are not equivalent or the output is
327 // visible to the user/debugger, update equivalence set and
328 // potentially emit store.
329 if (!output_info->IsEquivalent(input_info) || output_is_observable) {
330 // Remove from current set and invalidate equivalence id.
331 output_info->RemoveFromEquivalenceSet();
332 output_info->set_equivalence_id(equivalence_id_++);
333
334 // Place in new equivalence set.
335 RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent();
336 output_info->AddToEquivalenceSetOf(materialized_info);
337 output_info->set_materialized(output_is_observable);
338 if (output_is_observable) {
339 OutputRegisterTransfer(materialized_info, output_info);
340 }
341 }
342 }
343
344 void BytecodeRegisterOptimizer::DoLdar(const BytecodeNode* const node) {
345 Register input = GetRegisterInputOperand(
346 0, node->bytecode(), node->operands(), node->operand_count());
347 RegisterInfo* input_info = GetRegisterInfo(input);
348 RegisterTransfer(input_info, accumulator_info_);
349 }
350
351 void BytecodeRegisterOptimizer::DoMov(const BytecodeNode* const node) {
352 Register input = GetRegisterInputOperand(
353 0, node->bytecode(), node->operands(), node->operand_count());
354 RegisterInfo* input_info = GetRegisterInfo(input);
355 Register output = GetRegisterOutputOperand(
356 1, node->bytecode(), node->operands(), node->operand_count());
357 RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
358 RegisterTransfer(input_info, output_info);
359 }
360
361 void BytecodeRegisterOptimizer::DoStar(const BytecodeNode* const node) {
362 Register output = GetRegisterOutputOperand(
363 0, node->bytecode(), node->operands(), node->operand_count());
364 RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
365 RegisterTransfer(accumulator_info_, output_info);
366 }
367
368 void BytecodeRegisterOptimizer::PrepareForClobber(RegisterInfo* reg_info) {
369 CreateMaterializedEquivalentIfRequired(reg_info);
370
371 // Put register in it's own equivalence set.
372 reg_info->RemoveFromEquivalenceSet();
373 reg_info->set_materialized(true);
374 reg_info->set_equivalence_id(equivalence_id_++);
375 }
376
377 void BytecodeRegisterOptimizer::PrepareForClobberRange(Register start,
378 int count) {
379 for (int i = 0; i < count; ++i) {
380 Register reg(start.index() + i);
381 RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
382 PrepareForClobber(reg_info);
383 }
384 }
385
386 Register BytecodeRegisterOptimizer::PrepareRegisterInputOperand(Register reg) {
387 RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
388 if (reg_info->materialized()) {
389 return reg;
390 } else {
391 RegisterInfo* equivalent_info =
392 GetMaterializedEquivalentNotAccumulator(reg_info);
393 return equivalent_info->register_value();
394 }
395 }
396
397 void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start,
398 int count) {
399 for (int i = 0; i < count; ++i) {
400 Register current(start.index() + i);
401 RegisterInfo* input_info = GetRegisterInfo(current);
402 Materialize(input_info);
403 }
404 }
405
406 void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* node) {
407 if (Bytecodes::ReadsAccumulator(node->bytecode()) &&
408 !accumulator_info_->materialized()) {
409 Materialize(accumulator_info_);
410 }
411
412 if (Bytecodes::WritesAccumulator(node->bytecode())) {
413 PrepareForClobber(accumulator_info_);
414 }
415
416 // For each input, get a materialized equivalent if a single register
417 // else materialize if part of a list longer than 1.
418 // Update operand_scale if necessary.
419 //
420 // For each output register about to be clobbered, materialize an
421 // equivalent if it exists. Put each register in it's own equivalence set.
422 int register_operand_bitmap =
423 Bytecodes::GetRegisterOperandBitmap(node->bytecode());
424 const OperandType* operand_types =
425 Bytecodes::GetOperandTypes(node->bytecode());
426 uint32_t* operands = node->operands();
427 for (int i = 0; register_operand_bitmap != 0;
428 ++i, register_operand_bitmap >>= 1) {
429 if ((register_operand_bitmap & 1) == 0) {
430 continue;
431 }
432 OperandType operand_type = operand_types[i];
433 int count = 0;
434 if (operand_types[i + 1] == OperandType::kRegCount) {
435 count = static_cast<int>(operands[i + 1]);
436 if (count == 0) {
437 continue;
438 }
439 } else {
440 count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
441 }
442
443 Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
444 if (Bytecodes::IsRegisterInputOperandType(operand_type)) {
445 if (count == 1) {
446 Register equivalent = PrepareRegisterInputOperand(reg);
447 operands[i] = static_cast<uint32_t>(equivalent.ToOperand());
448 // Update operand scale as equivalent may be different.
449 OperandScale operand_scale =
450 Bytecodes::OperandSizesToScale(equivalent.SizeOfOperand());
451 if (operand_scale > node->operand_scale()) {
452 node->set_operand_scale(operand_scale);
453 }
454 } else if (count > 1) {
455 PrepareRegisterRangeInputOperand(reg, count);
456 }
457 } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) {
458 PrepareForClobberRange(reg, count);
459 }
460 }
461 }
462
463 // static
464 Register BytecodeRegisterOptimizer::OperandToRegister(uint32_t operand) {
465 return Register::FromOperand(static_cast<int32_t>(operand));
466 }
467
468 // static
469 Register BytecodeRegisterOptimizer::GetRegisterInputOperand(
470 int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
471 DCHECK_LT(index, operand_count);
472 DCHECK(Bytecodes::IsRegisterInputOperandType(
473 Bytecodes::GetOperandType(bytecode, index)));
474 return OperandToRegister(operands[index]);
475 }
476
477 // static
478 Register BytecodeRegisterOptimizer::GetRegisterOutputOperand(
479 int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
480 DCHECK_LT(index, operand_count);
481 DCHECK(Bytecodes::IsRegisterOutputOperandType(
482 Bytecodes::GetOperandType(bytecode, index)));
483 return OperandToRegister(operands[index]);
484 }
485
486 BytecodeRegisterOptimizer::RegisterInfo*
487 BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) {
488 size_t index = GetMapIndex(reg);
489 return (index < register_map_.size()) ? register_map_[index] : nullptr;
490 }
491
492 BytecodeRegisterOptimizer::RegisterInfo*
493 BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) {
494 size_t index = GetMapIndex(reg);
495 return index < register_map_.size() ? register_map_[index]
496 : NewRegisterInfo(reg);
497 }
498
499 BytecodeRegisterOptimizer::RegisterInfo*
500 BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) {
501 size_t index = GetMapIndex(reg);
502 DCHECK_GE(index, register_map_.size());
503 GrowRegisterMap(reg);
504 return register_map_[index];
505 }
506
507 void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
508 DCHECK(RegisterIsTemporary(reg));
509 size_t index = GetMapIndex(reg);
510 DCHECK_GE(index, register_map_.size());
511 size_t new_size = index + 1;
512 size_t old_size = register_map_.size();
513 register_map_.resize(new_size);
514 for (size_t i = old_size; i < new_size; ++i) {
515 register_map_[i] = new (zone())
516 RegisterInfo(RegisterFromMapIndex(i), equivalence_id_++, false);
517 }
518 }
519
520 void BytecodeRegisterOptimizer::TemporaryRegisterFreeEvent(Register reg) {
521 RegisterInfo* info = GetRegisterInfo(reg);
522
523 if (info != nullptr) {
524 // If register is materialized and part of equivalence set, make
525 // sure another member of the set holds the value before the
526 // temporary register is culled.
527 CreateMaterializedEquivalentIfRequired(info);
528 info->RemoveFromEquivalenceSet();
529 info->set_materialized(false);
530 info->set_equivalence_id(-1);
531 }
532 }
533
534 } // namespace interpreter
535 } // namespace internal
536 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698