OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/interpreter/bytecode-register-optimizer.h" | 5 #include "src/interpreter/bytecode-register-optimizer.h" |
6 | 6 |
7 namespace v8 { | 7 namespace v8 { |
8 namespace internal { | 8 namespace internal { |
9 namespace interpreter { | 9 namespace interpreter { |
10 | 10 |
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
223 for (size_t i = 0; i < register_info_table_.size(); ++i) { | 223 for (size_t i = 0; i < register_info_table_.size(); ++i) { |
224 register_info_table_[i] = new (zone) RegisterInfo( | 224 register_info_table_[i] = new (zone) RegisterInfo( |
225 RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true, true); | 225 RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true, true); |
226 DCHECK_EQ(register_info_table_[i]->register_value().index(), | 226 DCHECK_EQ(register_info_table_[i]->register_value().index(), |
227 RegisterFromRegisterInfoTableIndex(i).index()); | 227 RegisterFromRegisterInfoTableIndex(i).index()); |
228 } | 228 } |
229 accumulator_info_ = GetRegisterInfo(accumulator_); | 229 accumulator_info_ = GetRegisterInfo(accumulator_); |
230 DCHECK(accumulator_info_->register_value() == accumulator_); | 230 DCHECK(accumulator_info_->register_value() == accumulator_); |
231 } | 231 } |
232 | 232 |
233 // override | 233 void BytecodeRegisterOptimizer::Flush() { |
234 Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray( | |
235 Isolate* isolate, int register_count, int parameter_count, | |
236 Handle<FixedArray> handler_table) { | |
237 FlushState(); | |
238 return next_stage_->ToBytecodeArray(isolate, max_register_index_ + 1, | |
239 parameter_count, handler_table); | |
240 } | |
241 | |
242 // override | |
243 void BytecodeRegisterOptimizer::Write(BytecodeNode* node) { | |
244 // Jumps are handled by WriteJump. | |
245 DCHECK(!Bytecodes::IsJump(node->bytecode())); | |
246 // | |
247 // Transfers with observable registers as the destination will be | |
248 // immediately materialized so the source position information will | |
249 // be ordered correctly. | |
250 // | |
251 // Transfers without observable destination registers will initially | |
252 // be emitted as Nop's with the source position. They may, or may | |
253 // not, be materialized by the optimizer. However, the source | |
254 // position is not lost and being attached to a Nop is fine as the | |
255 // destination register is not observable in the debugger. | |
256 // | |
257 switch (node->bytecode()) { | |
258 case Bytecode::kLdar: { | |
259 DoLdar(node); | |
260 return; | |
261 } | |
262 case Bytecode::kStar: { | |
263 DoStar(node); | |
264 return; | |
265 } | |
266 case Bytecode::kMov: { | |
267 DoMov(node); | |
268 return; | |
269 } | |
270 default: | |
271 break; | |
272 } | |
273 | |
274 if (node->bytecode() == Bytecode::kDebugger || | |
275 node->bytecode() == Bytecode::kSuspendGenerator) { | |
276 // All state must be flushed before emitting | |
277 // - a call to the debugger (as it can manipulate locals and parameters), | |
278 // - a generator suspend (as this involves saving all registers). | |
279 FlushState(); | |
280 } | |
281 | |
282 PrepareOperands(node); | |
283 next_stage_->Write(node); | |
284 } | |
285 | |
286 // override | |
287 void BytecodeRegisterOptimizer::WriteJump(BytecodeNode* node, | |
288 BytecodeLabel* label) { | |
289 FlushState(); | |
290 next_stage_->WriteJump(node, label); | |
291 } | |
292 | |
293 // override | |
294 void BytecodeRegisterOptimizer::BindLabel(BytecodeLabel* label) { | |
295 FlushState(); | |
296 next_stage_->BindLabel(label); | |
297 } | |
298 | |
299 // override | |
300 void BytecodeRegisterOptimizer::BindLabel(const BytecodeLabel& target, | |
301 BytecodeLabel* label) { | |
302 // There is no need to flush here, it will have been flushed when |target| | |
303 // was bound. | |
304 next_stage_->BindLabel(target, label); | |
305 } | |
306 | |
307 void BytecodeRegisterOptimizer::FlushState() { | |
308 if (!flush_required_) { | 234 if (!flush_required_) { |
309 return; | 235 return; |
310 } | 236 } |
311 | 237 |
312 // Materialize all live registers and break equivalences. | 238 // Materialize all live registers and break equivalences. |
313 size_t count = register_info_table_.size(); | 239 size_t count = register_info_table_.size(); |
314 for (size_t i = 0; i < count; ++i) { | 240 for (size_t i = 0; i < count; ++i) { |
315 RegisterInfo* reg_info = register_info_table_[i]; | 241 RegisterInfo* reg_info = register_info_table_[i]; |
316 if (reg_info->materialized()) { | 242 if (reg_info->materialized()) { |
317 // Walk equivalents of materialized registers, materializing | 243 // Walk equivalents of materialized registers, materializing |
318 // each equivalent register as necessary and placing in their | 244 // each equivalent register as necessary and placing in their |
319 // own equivalence set. | 245 // own equivalence set. |
320 RegisterInfo* equivalent; | 246 RegisterInfo* equivalent; |
321 while ((equivalent = reg_info->GetEquivalent()) != reg_info) { | 247 while ((equivalent = reg_info->GetEquivalent()) != reg_info) { |
322 if (equivalent->allocated() && !equivalent->materialized()) { | 248 if (equivalent->allocated() && !equivalent->materialized()) { |
323 OutputRegisterTransfer(reg_info, equivalent); | 249 OutputRegisterTransfer(reg_info, equivalent); |
324 } | 250 } |
325 equivalent->MoveToNewEquivalenceSet(NextEquivalenceId(), true); | 251 equivalent->MoveToNewEquivalenceSet(NextEquivalenceId(), true); |
326 } | 252 } |
327 } | 253 } |
328 } | 254 } |
329 | 255 |
330 flush_required_ = false; | 256 flush_required_ = false; |
331 } | 257 } |
332 | 258 |
333 void BytecodeRegisterOptimizer::OutputRegisterTransfer( | 259 void BytecodeRegisterOptimizer::OutputRegisterTransfer( |
334 RegisterInfo* input_info, RegisterInfo* output_info, | 260 RegisterInfo* input_info, RegisterInfo* output_info, |
335 BytecodeSourceInfo* source_info) { | 261 BytecodeSourceInfo source_info) { |
336 Register input = input_info->register_value(); | 262 Register input = input_info->register_value(); |
337 Register output = output_info->register_value(); | 263 Register output = output_info->register_value(); |
338 DCHECK_NE(input.index(), output.index()); | 264 DCHECK_NE(input.index(), output.index()); |
339 | 265 |
340 if (input == accumulator_) { | 266 if (input == accumulator_) { |
341 uint32_t operand = static_cast<uint32_t>(output.ToOperand()); | 267 uint32_t operand = static_cast<uint32_t>(output.ToOperand()); |
342 BytecodeNode node(Bytecode::kStar, operand, source_info); | 268 BytecodeNode node(Bytecode::kStar, operand, source_info); |
343 next_stage_->Write(&node); | 269 next_stage_->Write(&node); |
344 } else if (output == accumulator_) { | 270 } else if (output == accumulator_) { |
345 uint32_t operand = static_cast<uint32_t>(input.ToOperand()); | 271 uint32_t operand = static_cast<uint32_t>(input.ToOperand()); |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
397 void BytecodeRegisterOptimizer::AddToEquivalenceSet( | 323 void BytecodeRegisterOptimizer::AddToEquivalenceSet( |
398 RegisterInfo* set_member, RegisterInfo* non_set_member) { | 324 RegisterInfo* set_member, RegisterInfo* non_set_member) { |
399 non_set_member->AddToEquivalenceSetOf(set_member); | 325 non_set_member->AddToEquivalenceSetOf(set_member); |
400 // Flushing is only required when two or more registers are placed | 326 // Flushing is only required when two or more registers are placed |
401 // in the same equivalence set. | 327 // in the same equivalence set. |
402 flush_required_ = true; | 328 flush_required_ = true; |
403 } | 329 } |
404 | 330 |
405 void BytecodeRegisterOptimizer::RegisterTransfer( | 331 void BytecodeRegisterOptimizer::RegisterTransfer( |
406 RegisterInfo* input_info, RegisterInfo* output_info, | 332 RegisterInfo* input_info, RegisterInfo* output_info, |
407 BytecodeSourceInfo* source_info) { | 333 BytecodeSourceInfo source_info) { |
408 // Materialize an alternate in the equivalence set that | 334 // Materialize an alternate in the equivalence set that |
409 // |output_info| is leaving. | 335 // |output_info| is leaving. |
410 if (output_info->materialized()) { | 336 if (output_info->materialized()) { |
411 CreateMaterializedEquivalent(output_info); | 337 CreateMaterializedEquivalent(output_info); |
412 } | 338 } |
413 | 339 |
414 // Add |output_info| to new equivalence set. | 340 // Add |output_info| to new equivalence set. |
415 if (!output_info->IsInSameEquivalenceSet(input_info)) { | 341 if (!output_info->IsInSameEquivalenceSet(input_info)) { |
416 AddToEquivalenceSet(input_info, output_info); | 342 AddToEquivalenceSet(input_info, output_info); |
417 } | 343 } |
418 | 344 |
419 bool output_is_observable = | 345 bool output_is_observable = |
420 RegisterIsObservable(output_info->register_value()); | 346 RegisterIsObservable(output_info->register_value()); |
421 if (output_is_observable) { | 347 if (output_is_observable) { |
422 // Force store to be emitted when register is observable. | 348 // Force store to be emitted when register is observable. |
423 output_info->set_materialized(false); | 349 output_info->set_materialized(false); |
424 RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent(); | 350 RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent(); |
425 OutputRegisterTransfer(materialized_info, output_info, source_info); | 351 OutputRegisterTransfer(materialized_info, output_info, source_info); |
426 } else if (source_info->is_valid()) { | 352 } else if (source_info.is_valid()) { |
427 // Emit a placeholder nop to maintain source position info. | 353 // Emit a placeholder nop to maintain source position info. |
428 EmitNopForSourceInfo(source_info); | 354 EmitNopForSourceInfo(source_info); |
429 } | 355 } |
430 | 356 |
431 bool input_is_observable = RegisterIsObservable(input_info->register_value()); | 357 bool input_is_observable = RegisterIsObservable(input_info->register_value()); |
432 if (input_is_observable) { | 358 if (input_is_observable) { |
433 // If input is observable by the debugger, mark all other temporaries | 359 // If input is observable by the debugger, mark all other temporaries |
434 // registers as unmaterialized so that this register is used in preference. | 360 // registers as unmaterialized so that this register is used in preference. |
435 input_info->MarkTemporariesAsUnmaterialized(temporary_base_); | 361 input_info->MarkTemporariesAsUnmaterialized(temporary_base_); |
436 } | 362 } |
437 } | 363 } |
438 | 364 |
439 void BytecodeRegisterOptimizer::EmitNopForSourceInfo( | 365 void BytecodeRegisterOptimizer::EmitNopForSourceInfo( |
440 BytecodeSourceInfo* source_info) const { | 366 BytecodeSourceInfo source_info) const { |
441 DCHECK(source_info->is_valid()); | 367 DCHECK(source_info.is_valid()); |
442 BytecodeNode nop(Bytecode::kNop, source_info); | 368 BytecodeNode nop(Bytecode::kNop, source_info); |
443 next_stage_->Write(&nop); | 369 next_stage_->Write(&nop); |
444 } | 370 } |
445 | 371 |
446 void BytecodeRegisterOptimizer::DoLdar(BytecodeNode* node) { | 372 void BytecodeRegisterOptimizer::PrepareOutputRegister(Register reg) { |
447 Register input = GetRegisterInputOperand( | 373 RegisterInfo* reg_info = GetRegisterInfo(reg); |
448 0, node->bytecode(), node->operands(), node->operand_count()); | |
449 RegisterInfo* input_info = GetRegisterInfo(input); | |
450 RegisterTransfer(input_info, accumulator_info_, node->source_info_ptr()); | |
451 } | |
452 | |
453 void BytecodeRegisterOptimizer::DoMov(BytecodeNode* node) { | |
454 Register input = GetRegisterInputOperand( | |
455 0, node->bytecode(), node->operands(), node->operand_count()); | |
456 RegisterInfo* input_info = GetRegisterInfo(input); | |
457 Register output = GetRegisterOutputOperand( | |
458 1, node->bytecode(), node->operands(), node->operand_count()); | |
459 RegisterInfo* output_info = GetRegisterInfo(output); | |
460 RegisterTransfer(input_info, output_info, node->source_info_ptr()); | |
461 } | |
462 | |
463 void BytecodeRegisterOptimizer::DoStar(BytecodeNode* node) { | |
464 Register output = GetRegisterOutputOperand( | |
465 0, node->bytecode(), node->operands(), node->operand_count()); | |
466 RegisterInfo* output_info = GetRegisterInfo(output); | |
467 RegisterTransfer(accumulator_info_, output_info, node->source_info_ptr()); | |
468 } | |
469 | |
470 void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand( | |
471 RegisterInfo* reg_info) { | |
472 if (reg_info->materialized()) { | 374 if (reg_info->materialized()) { |
473 CreateMaterializedEquivalent(reg_info); | 375 CreateMaterializedEquivalent(reg_info); |
474 } | 376 } |
| 377 reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true); |
475 max_register_index_ = | 378 max_register_index_ = |
476 std::max(max_register_index_, reg_info->register_value().index()); | 379 std::max(max_register_index_, reg_info->register_value().index()); |
477 reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true); | |
478 } | 380 } |
479 | 381 |
480 void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand( | 382 void BytecodeRegisterOptimizer::PrepareOutputRegisterList( |
481 Register start, int count) { | 383 RegisterList reg_list) { |
482 for (int i = 0; i < count; ++i) { | 384 int start_index = reg_list.first_register().index(); |
483 Register reg(start.index() + i); | 385 for (int i = 0; i < reg_list.register_count(); ++i) { |
484 RegisterInfo* reg_info = GetRegisterInfo(reg); | 386 Register current(start_index + i); |
485 PrepareRegisterOutputOperand(reg_info); | 387 PrepareOutputRegister(current); |
486 } | 388 } |
487 } | 389 } |
488 | 390 |
489 Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand( | 391 Register BytecodeRegisterOptimizer::GetInputRegister(Register reg) { |
490 Register reg) { | |
491 // For a temporary register, RegInfo state may need be created. For | |
492 // locals and parameters, the RegInfo state is created in the | |
493 // BytecodeRegisterOptimizer constructor. | |
494 RegisterInfo* reg_info = GetRegisterInfo(reg); | 392 RegisterInfo* reg_info = GetRegisterInfo(reg); |
495 if (reg_info->materialized()) { | 393 if (reg_info->materialized()) { |
496 return reg; | 394 return reg; |
497 } else { | 395 } else { |
498 RegisterInfo* equivalent_info = | 396 RegisterInfo* equivalent_info = |
499 GetMaterializedEquivalentNotAccumulator(reg_info); | 397 GetMaterializedEquivalentNotAccumulator(reg_info); |
500 return equivalent_info->register_value(); | 398 return equivalent_info->register_value(); |
501 } | 399 } |
502 } | 400 } |
503 | 401 |
504 void BytecodeRegisterOptimizer::PrepareRegisterInputOperand( | 402 RegisterList BytecodeRegisterOptimizer::GetInputRegisterList( |
505 BytecodeNode* const node, Register reg, int operand_index) { | 403 RegisterList reg_list) { |
506 Register equivalent = GetEquivalentRegisterForInputOperand(reg); | 404 if (reg_list.register_count() == 1) { |
507 node->UpdateOperand(operand_index, | 405 // If there is only a single register, treat it as a normal input register. |
508 static_cast<uint32_t>(equivalent.ToOperand())); | 406 Register reg(GetInputRegister(reg_list.first_register())); |
509 } | 407 return RegisterList(reg.index(), 1); |
510 | 408 } else { |
511 void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start, | 409 int start_index = reg_list.first_register().index(); |
512 int count) { | 410 for (int i = 0; i < reg_list.register_count(); ++i) { |
513 for (int i = 0; i < count; ++i) { | 411 Register current(start_index + i); |
514 Register current(start.index() + i); | 412 RegisterInfo* input_info = GetRegisterInfo(current); |
515 RegisterInfo* input_info = GetRegisterInfo(current); | 413 Materialize(input_info); |
516 Materialize(input_info); | 414 } |
| 415 return reg_list; |
517 } | 416 } |
518 } | 417 } |
519 | 418 |
520 void BytecodeRegisterOptimizer::PrepareRegisterOperands( | 419 void BytecodeRegisterOptimizer::PrepareForBytecode(Bytecode bytecode) { |
521 BytecodeNode* const node) { | 420 if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger || |
522 // | 421 bytecode == Bytecode::kSuspendGenerator) { |
523 // For each input operand, get a materialized equivalent if it is | 422 // All state must be flushed before emitting |
524 // just a single register, otherwise materialize register range. | 423 // - a jump bytecode (as the register equivalents at the jump target aren't |
525 // Update operand_scale if necessary. | 424 // known. |
526 // | 425 // - a call to the debugger (as it can manipulate locals and parameters), |
527 // For each output register about to be clobbered, materialize an | 426 // - a generator suspend (as this involves saving all registers). |
528 // equivalent if it exists. Put each register in it's own equivalence set. | 427 Flush(); |
529 // | 428 } |
530 const uint32_t* operands = node->operands(); | |
531 int operand_count = node->operand_count(); | |
532 const OperandType* operand_types = | |
533 Bytecodes::GetOperandTypes(node->bytecode()); | |
534 for (int i = 0; i < operand_count; ++i) { | |
535 int count; | |
536 if (operand_types[i] == OperandType::kRegList) { | |
537 DCHECK_LT(i, operand_count - 1); | |
538 DCHECK(operand_types[i + 1] == OperandType::kRegCount); | |
539 count = static_cast<int>(operands[i + 1]); | |
540 } else { | |
541 count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]); | |
542 } | |
543 | 429 |
544 if (count == 0) { | |
545 continue; | |
546 } | |
547 | |
548 Register reg = Register::FromOperand(static_cast<int32_t>(operands[i])); | |
549 if (Bytecodes::IsRegisterInputOperandType(operand_types[i])) { | |
550 if (count == 1) { | |
551 PrepareRegisterInputOperand(node, reg, i); | |
552 } else if (count > 1) { | |
553 PrepareRegisterRangeInputOperand(reg, count); | |
554 } | |
555 } else if (Bytecodes::IsRegisterOutputOperandType(operand_types[i])) { | |
556 PrepareRegisterRangeOutputOperand(reg, count); | |
557 } | |
558 } | |
559 } | |
560 | |
561 void BytecodeRegisterOptimizer::PrepareAccumulator(BytecodeNode* const node) { | |
562 // Materialize the accumulator if it is read by the bytecode. The | 430 // Materialize the accumulator if it is read by the bytecode. The |
563 // accumulator is special and no other register can be materialized | 431 // accumulator is special and no other register can be materialized |
564 // in it's place. | 432 // in it's place. |
565 if (Bytecodes::ReadsAccumulator(node->bytecode()) && | 433 if (Bytecodes::ReadsAccumulator(bytecode) && |
566 !accumulator_info_->materialized()) { | 434 !accumulator_info_->materialized()) { |
567 Materialize(accumulator_info_); | 435 Materialize(accumulator_info_); |
568 } | 436 } |
569 | 437 |
570 // Materialize an equivalent to the accumulator if it will be | 438 // Materialize an equivalent to the accumulator if it will be |
571 // clobbered when the bytecode is dispatched. | 439 // clobbered when the bytecode is dispatched. |
572 if (Bytecodes::WritesAccumulator(node->bytecode())) { | 440 if (Bytecodes::WritesAccumulator(bytecode)) { |
573 PrepareRegisterOutputOperand(accumulator_info_); | 441 PrepareOutputRegister(accumulator_); |
574 } | 442 } |
575 } | 443 } |
576 | 444 |
577 void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* const node) { | |
578 PrepareAccumulator(node); | |
579 PrepareRegisterOperands(node); | |
580 } | |
581 | |
582 // static | |
583 Register BytecodeRegisterOptimizer::GetRegisterInputOperand( | |
584 int index, Bytecode bytecode, const uint32_t* operands, int operand_count) { | |
585 DCHECK_LT(index, operand_count); | |
586 DCHECK(Bytecodes::IsRegisterInputOperandType( | |
587 Bytecodes::GetOperandType(bytecode, index))); | |
588 return OperandToRegister(operands[index]); | |
589 } | |
590 | |
591 // static | |
592 Register BytecodeRegisterOptimizer::GetRegisterOutputOperand( | |
593 int index, Bytecode bytecode, const uint32_t* operands, int operand_count) { | |
594 DCHECK_LT(index, operand_count); | |
595 DCHECK(Bytecodes::IsRegisterOutputOperandType( | |
596 Bytecodes::GetOperandType(bytecode, index))); | |
597 return OperandToRegister(operands[index]); | |
598 } | |
599 | |
600 BytecodeRegisterOptimizer::RegisterInfo* | |
601 BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) { | |
602 size_t index = GetRegisterInfoTableIndex(reg); | |
603 DCHECK_LT(index, register_info_table_.size()); | |
604 return register_info_table_[index]; | |
605 } | |
606 | |
607 BytecodeRegisterOptimizer::RegisterInfo* | |
608 BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) { | |
609 size_t index = GetRegisterInfoTableIndex(reg); | |
610 return index < register_info_table_.size() ? register_info_table_[index] | |
611 : NewRegisterInfo(reg); | |
612 } | |
613 | |
614 BytecodeRegisterOptimizer::RegisterInfo* | |
615 BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) { | |
616 size_t index = GetRegisterInfoTableIndex(reg); | |
617 DCHECK_GE(index, register_info_table_.size()); | |
618 GrowRegisterMap(reg); | |
619 return register_info_table_[index]; | |
620 } | |
621 | |
622 void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) { | 445 void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) { |
623 DCHECK(RegisterIsTemporary(reg)); | 446 DCHECK(RegisterIsTemporary(reg)); |
624 size_t index = GetRegisterInfoTableIndex(reg); | 447 size_t index = GetRegisterInfoTableIndex(reg); |
625 if (index >= register_info_table_.size()) { | 448 if (index >= register_info_table_.size()) { |
626 size_t new_size = index + 1; | 449 size_t new_size = index + 1; |
627 size_t old_size = register_info_table_.size(); | 450 size_t old_size = register_info_table_.size(); |
628 register_info_table_.resize(new_size); | 451 register_info_table_.resize(new_size); |
629 for (size_t i = old_size; i < new_size; ++i) { | 452 for (size_t i = old_size; i < new_size; ++i) { |
630 register_info_table_[i] = | 453 register_info_table_[i] = |
631 new (zone()) RegisterInfo(RegisterFromRegisterInfoTableIndex(i), | 454 new (zone()) RegisterInfo(RegisterFromRegisterInfoTableIndex(i), |
(...skipping 20 matching lines...) Expand all Loading... |
652 void BytecodeRegisterOptimizer::RegisterListFreeEvent(RegisterList reg_list) { | 475 void BytecodeRegisterOptimizer::RegisterListFreeEvent(RegisterList reg_list) { |
653 int first_index = reg_list.first_register().index(); | 476 int first_index = reg_list.first_register().index(); |
654 for (int i = 0; i < reg_list.register_count(); i++) { | 477 for (int i = 0; i < reg_list.register_count(); i++) { |
655 GetRegisterInfo(Register(first_index + i))->set_allocated(false); | 478 GetRegisterInfo(Register(first_index + i))->set_allocated(false); |
656 } | 479 } |
657 } | 480 } |
658 | 481 |
659 } // namespace interpreter | 482 } // namespace interpreter |
660 } // namespace internal | 483 } // namespace internal |
661 } // namespace v8 | 484 } // namespace v8 |
OLD | NEW |