| OLD | NEW |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/interpreter/bytecode-register-optimizer.h" | 5 #include "src/interpreter/bytecode-register-optimizer.h" |
| 6 | 6 |
| 7 namespace v8 { | 7 namespace v8 { |
| 8 namespace internal { | 8 namespace internal { |
| 9 namespace interpreter { | 9 namespace interpreter { |
| 10 | 10 |
| (...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 167 BytecodeRegisterOptimizer::RegisterInfo* | 167 BytecodeRegisterOptimizer::RegisterInfo* |
| 168 BytecodeRegisterOptimizer::RegisterInfo::GetEquivalent() { | 168 BytecodeRegisterOptimizer::RegisterInfo::GetEquivalent() { |
| 169 return next_; | 169 return next_; |
| 170 } | 170 } |
| 171 | 171 |
| 172 BytecodeRegisterOptimizer::BytecodeRegisterOptimizer( | 172 BytecodeRegisterOptimizer::BytecodeRegisterOptimizer( |
| 173 Zone* zone, TemporaryRegisterAllocator* register_allocator, | 173 Zone* zone, TemporaryRegisterAllocator* register_allocator, |
| 174 int parameter_count, BytecodePipelineStage* next_stage) | 174 int parameter_count, BytecodePipelineStage* next_stage) |
| 175 : accumulator_(Register::virtual_accumulator()), | 175 : accumulator_(Register::virtual_accumulator()), |
| 176 temporary_base_(register_allocator->allocation_base()), | 176 temporary_base_(register_allocator->allocation_base()), |
| 177 max_register_index_(register_allocator->allocation_base() - 1), | |
| 178 register_info_table_(zone), | 177 register_info_table_(zone), |
| 179 equivalence_id_(0), | 178 equivalence_id_(0), |
| 180 next_stage_(next_stage), | 179 next_stage_(next_stage), |
| 181 flush_required_(false), | 180 flush_required_(false), |
| 182 zone_(zone) { | 181 zone_(zone) { |
| 183 register_allocator->set_observer(this); | 182 register_allocator->set_observer(this); |
| 184 | 183 |
| 185 // Calculate offset so register index values can be mapped into | 184 // Calculate offset so register index values can be mapped into |
| 186 // a vector of register metadata. | 185 // a vector of register metadata. |
| 187 if (parameter_count != 0) { | 186 if (parameter_count != 0) { |
| (...skipping 14 matching lines...) Expand all Loading... |
| 202 RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true); | 201 RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true); |
| 203 DCHECK_EQ(register_info_table_[i]->register_value().index(), | 202 DCHECK_EQ(register_info_table_[i]->register_value().index(), |
| 204 RegisterFromRegisterInfoTableIndex(i).index()); | 203 RegisterFromRegisterInfoTableIndex(i).index()); |
| 205 } | 204 } |
| 206 accumulator_info_ = GetRegisterInfo(accumulator_); | 205 accumulator_info_ = GetRegisterInfo(accumulator_); |
| 207 DCHECK(accumulator_info_->register_value() == accumulator_); | 206 DCHECK(accumulator_info_->register_value() == accumulator_); |
| 208 } | 207 } |
| 209 | 208 |
| 210 // override | 209 // override |
| 211 Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray( | 210 Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray( |
| 212 Isolate* isolate, int register_count, int parameter_count, | 211 Isolate* isolate, int fixed_register_count, int parameter_count, |
| 213 Handle<FixedArray> handler_table) { | 212 Handle<FixedArray> handler_table) { |
| 214 FlushState(); | 213 FlushState(); |
| 215 return next_stage_->ToBytecodeArray(isolate, max_register_index_ + 1, | 214 return next_stage_->ToBytecodeArray(isolate, fixed_register_count, |
| 216 parameter_count, handler_table); | 215 parameter_count, handler_table); |
| 217 } | 216 } |
| 218 | 217 |
| 219 // override | 218 // override |
| 220 void BytecodeRegisterOptimizer::Write(BytecodeNode* node) { | 219 void BytecodeRegisterOptimizer::Write(BytecodeNode* node) { |
| 221 // Jumps are handled by WriteJump. | |
| 222 DCHECK(!Bytecodes::IsJump(node->bytecode())); | |
| 223 // | 220 // |
| 224 // Transfers with observable registers as the destination will be | 221 // Transfers with observable registers as the destination will be |
| 225 // immediately materialized so the source position information will | 222 // immediately materialized so the source position information will |
| 226 // be ordered correctly. | 223 // be ordered correctly. |
| 227 // | 224 // |
| 228 // Transfers without observable destination registers will initially | 225 // Transfers without observable destination registers will initially |
| 229 // be emitted as Nop's with the source position. They may, or may | 226 // be emitted as Nop's with the source position. They may, or may |
| 230 // not, be materialized by the optimizer. However, the source | 227 // not, be materialized by the optimizer. However, the source |
| 231 // position is not lost and being attached to a Nop is fine as the | 228 // position is not lost and being attached to a Nop is fine as the |
| 232 // destination register is not observable in the debugger. | 229 // destination register is not observable in the debugger. |
| 233 // | 230 // |
| 234 switch (node->bytecode()) { | 231 switch (node->bytecode()) { |
| 235 case Bytecode::kLdar: { | 232 case Bytecode::kLdar: { |
| 236 DoLdar(node); | 233 DoLdar(node); |
| 237 return; | 234 return; |
| 238 } | 235 } |
| 239 case Bytecode::kStar: { | 236 case Bytecode::kStar: { |
| 240 DoStar(node); | 237 DoStar(node); |
| 241 return; | 238 return; |
| 242 } | 239 } |
| 243 case Bytecode::kMov: { | 240 case Bytecode::kMov: { |
| 244 DoMov(node); | 241 DoMov(node); |
| 245 return; | 242 return; |
| 246 } | 243 } |
| 247 default: | 244 default: |
| 248 break; | 245 break; |
| 249 } | 246 } |
| 250 | 247 |
| 251 if (node->bytecode() == Bytecode::kDebugger || | 248 if (Bytecodes::IsJump(node->bytecode()) || |
| 249 node->bytecode() == Bytecode::kDebugger || |
| 252 node->bytecode() == Bytecode::kSuspendGenerator) { | 250 node->bytecode() == Bytecode::kSuspendGenerator) { |
| 253 // All state must be flushed before emitting | 251 // All state must be flushed before emitting |
| 252 // - a jump (due to how bytecode offsets for jumps are evaluated), |
| 254 // - a call to the debugger (as it can manipulate locals and parameters), | 253 // - a call to the debugger (as it can manipulate locals and parameters), |
| 255 // - a generator suspend (as this involves saving all registers). | 254 // - a generator suspend (as this involves saving all registers). |
| 256 FlushState(); | 255 FlushState(); |
| 257 } | 256 } |
| 258 | 257 |
| 259 PrepareOperands(node); | 258 PrepareOperands(node); |
| 260 next_stage_->Write(node); | 259 WriteToNextStage(node); |
| 261 } | 260 } |
| 262 | 261 |
| 263 // override | 262 // override |
| 264 void BytecodeRegisterOptimizer::WriteJump(BytecodeNode* node, | 263 void BytecodeRegisterOptimizer::WriteJump(BytecodeNode* node, |
| 265 BytecodeLabel* label) { | 264 BytecodeLabel* label) { |
| 266 FlushState(); | 265 FlushState(); |
| 267 next_stage_->WriteJump(node, label); | 266 next_stage_->WriteJump(node, label); |
| 268 } | 267 } |
| 269 | 268 |
| 270 // override | 269 // override |
| (...skipping 29 matching lines...) Expand all Loading... |
| 300 OutputRegisterTransfer(reg_info, equivalent); | 299 OutputRegisterTransfer(reg_info, equivalent); |
| 301 } | 300 } |
| 302 equivalent->MoveToNewEquivalenceSet(NextEquivalenceId(), true); | 301 equivalent->MoveToNewEquivalenceSet(NextEquivalenceId(), true); |
| 303 } | 302 } |
| 304 } | 303 } |
| 305 } | 304 } |
| 306 | 305 |
| 307 flush_required_ = false; | 306 flush_required_ = false; |
| 308 } | 307 } |
| 309 | 308 |
| 309 void BytecodeRegisterOptimizer::WriteToNextStage(BytecodeNode* node) const { |
| 310 next_stage_->Write(node); |
| 311 } |
| 312 |
| 313 void BytecodeRegisterOptimizer::WriteToNextStage( |
| 314 BytecodeNode* node, const BytecodeSourceInfo& source_info) const { |
| 315 if (source_info.is_valid()) { |
| 316 node->source_info().Clone(source_info); |
| 317 } |
| 318 next_stage_->Write(node); |
| 319 } |
| 320 |
| 310 void BytecodeRegisterOptimizer::OutputRegisterTransfer( | 321 void BytecodeRegisterOptimizer::OutputRegisterTransfer( |
| 311 RegisterInfo* input_info, RegisterInfo* output_info, | 322 RegisterInfo* input_info, RegisterInfo* output_info, |
| 312 BytecodeSourceInfo* source_info) { | 323 const BytecodeSourceInfo& source_info) { |
| 313 Register input = input_info->register_value(); | 324 Register input = input_info->register_value(); |
| 314 Register output = output_info->register_value(); | 325 Register output = output_info->register_value(); |
| 315 DCHECK_NE(input.index(), output.index()); | 326 DCHECK_NE(input.index(), output.index()); |
| 316 | 327 |
| 317 if (input == accumulator_) { | 328 if (input == accumulator_) { |
| 318 uint32_t operand = static_cast<uint32_t>(output.ToOperand()); | 329 uint32_t operand = static_cast<uint32_t>(output.ToOperand()); |
| 319 BytecodeNode node(Bytecode::kStar, operand, source_info); | 330 BytecodeNode node(Bytecode::kStar, operand); |
| 320 next_stage_->Write(&node); | 331 WriteToNextStage(&node, source_info); |
| 321 } else if (output == accumulator_) { | 332 } else if (output == accumulator_) { |
| 322 uint32_t operand = static_cast<uint32_t>(input.ToOperand()); | 333 uint32_t operand = static_cast<uint32_t>(input.ToOperand()); |
| 323 BytecodeNode node(Bytecode::kLdar, operand, source_info); | 334 BytecodeNode node(Bytecode::kLdar, operand); |
| 324 next_stage_->Write(&node); | 335 WriteToNextStage(&node, source_info); |
| 325 } else { | 336 } else { |
| 326 uint32_t operand0 = static_cast<uint32_t>(input.ToOperand()); | 337 uint32_t operand0 = static_cast<uint32_t>(input.ToOperand()); |
| 327 uint32_t operand1 = static_cast<uint32_t>(output.ToOperand()); | 338 uint32_t operand1 = static_cast<uint32_t>(output.ToOperand()); |
| 328 BytecodeNode node(Bytecode::kMov, operand0, operand1, source_info); | 339 BytecodeNode node(Bytecode::kMov, operand0, operand1); |
| 329 next_stage_->Write(&node); | 340 WriteToNextStage(&node, source_info); |
| 330 } | |
| 331 if (output != accumulator_) { | |
| 332 max_register_index_ = std::max(max_register_index_, output.index()); | |
| 333 } | 341 } |
| 334 output_info->set_materialized(true); | 342 output_info->set_materialized(true); |
| 335 } | 343 } |
| 336 | 344 |
| 337 void BytecodeRegisterOptimizer::CreateMaterializedEquivalent( | 345 void BytecodeRegisterOptimizer::CreateMaterializedEquivalent( |
| 338 RegisterInfo* info) { | 346 RegisterInfo* info) { |
| 339 DCHECK(info->materialized()); | 347 DCHECK(info->materialized()); |
| 340 RegisterInfo* unmaterialized = info->GetEquivalentToMaterialize(); | 348 RegisterInfo* unmaterialized = info->GetEquivalentToMaterialize(); |
| 341 if (unmaterialized) { | 349 if (unmaterialized) { |
| 342 OutputRegisterTransfer(info, unmaterialized); | 350 OutputRegisterTransfer(info, unmaterialized); |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 374 void BytecodeRegisterOptimizer::AddToEquivalenceSet( | 382 void BytecodeRegisterOptimizer::AddToEquivalenceSet( |
| 375 RegisterInfo* set_member, RegisterInfo* non_set_member) { | 383 RegisterInfo* set_member, RegisterInfo* non_set_member) { |
| 376 non_set_member->AddToEquivalenceSetOf(set_member); | 384 non_set_member->AddToEquivalenceSetOf(set_member); |
| 377 // Flushing is only required when two or more registers are placed | 385 // Flushing is only required when two or more registers are placed |
| 378 // in the same equivalence set. | 386 // in the same equivalence set. |
| 379 flush_required_ = true; | 387 flush_required_ = true; |
| 380 } | 388 } |
| 381 | 389 |
| 382 void BytecodeRegisterOptimizer::RegisterTransfer( | 390 void BytecodeRegisterOptimizer::RegisterTransfer( |
| 383 RegisterInfo* input_info, RegisterInfo* output_info, | 391 RegisterInfo* input_info, RegisterInfo* output_info, |
| 384 BytecodeSourceInfo* source_info) { | 392 const BytecodeSourceInfo& source_info) { |
| 385 // Materialize an alternate in the equivalence set that | 393 // Materialize an alternate in the equivalence set that |
| 386 // |output_info| is leaving. | 394 // |output_info| is leaving. |
| 387 if (output_info->materialized()) { | 395 if (output_info->materialized()) { |
| 388 CreateMaterializedEquivalent(output_info); | 396 CreateMaterializedEquivalent(output_info); |
| 389 } | 397 } |
| 390 | 398 |
| 391 // Add |output_info| to new equivalence set. | 399 // Add |output_info| to new equivalence set. |
| 392 if (!output_info->IsInSameEquivalenceSet(input_info)) { | 400 if (!output_info->IsInSameEquivalenceSet(input_info)) { |
| 393 AddToEquivalenceSet(input_info, output_info); | 401 AddToEquivalenceSet(input_info, output_info); |
| 394 } | 402 } |
| 395 | 403 |
| 396 bool output_is_observable = | 404 bool output_is_observable = |
| 397 RegisterIsObservable(output_info->register_value()); | 405 RegisterIsObservable(output_info->register_value()); |
| 398 if (output_is_observable) { | 406 if (output_is_observable) { |
| 399 // Force store to be emitted when register is observable. | 407 // Force store to be emitted when register is observable. |
| 400 output_info->set_materialized(false); | 408 output_info->set_materialized(false); |
| 401 RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent(); | 409 RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent(); |
| 402 OutputRegisterTransfer(materialized_info, output_info, source_info); | 410 OutputRegisterTransfer(materialized_info, output_info, source_info); |
| 403 } else if (source_info->is_valid()) { | 411 } else if (source_info.is_valid()) { |
| 404 // Emit a placeholder nop to maintain source position info. | 412 // Emit a placeholder nop to maintain source position info. |
| 405 EmitNopForSourceInfo(source_info); | 413 EmitNopForSourceInfo(source_info); |
| 406 } | 414 } |
| 407 } | 415 } |
| 408 | 416 |
| 409 void BytecodeRegisterOptimizer::EmitNopForSourceInfo( | 417 void BytecodeRegisterOptimizer::EmitNopForSourceInfo( |
| 410 BytecodeSourceInfo* source_info) const { | 418 const BytecodeSourceInfo& source_info) const { |
| 411 DCHECK(source_info->is_valid()); | 419 DCHECK(source_info.is_valid()); |
| 412 BytecodeNode nop(Bytecode::kNop, source_info); | 420 BytecodeNode nop(Bytecode::kNop); |
| 413 next_stage_->Write(&nop); | 421 nop.source_info().Clone(source_info); |
| 422 WriteToNextStage(&nop); |
| 414 } | 423 } |
| 415 | 424 |
| 416 void BytecodeRegisterOptimizer::DoLdar(BytecodeNode* node) { | 425 void BytecodeRegisterOptimizer::DoLdar(const BytecodeNode* const node) { |
| 417 Register input = GetRegisterInputOperand( | 426 Register input = GetRegisterInputOperand( |
| 418 0, node->bytecode(), node->operands(), node->operand_count()); | 427 0, node->bytecode(), node->operands(), node->operand_count()); |
| 419 RegisterInfo* input_info = GetRegisterInfo(input); | 428 RegisterInfo* input_info = GetRegisterInfo(input); |
| 420 RegisterTransfer(input_info, accumulator_info_, node->source_info_ptr()); | 429 RegisterTransfer(input_info, accumulator_info_, node->source_info()); |
| 421 } | 430 } |
| 422 | 431 |
| 423 void BytecodeRegisterOptimizer::DoMov(BytecodeNode* node) { | 432 void BytecodeRegisterOptimizer::DoMov(const BytecodeNode* const node) { |
| 424 Register input = GetRegisterInputOperand( | 433 Register input = GetRegisterInputOperand( |
| 425 0, node->bytecode(), node->operands(), node->operand_count()); | 434 0, node->bytecode(), node->operands(), node->operand_count()); |
| 426 RegisterInfo* input_info = GetRegisterInfo(input); | 435 RegisterInfo* input_info = GetRegisterInfo(input); |
| 427 Register output = GetRegisterOutputOperand( | 436 Register output = GetRegisterOutputOperand( |
| 428 1, node->bytecode(), node->operands(), node->operand_count()); | 437 1, node->bytecode(), node->operands(), node->operand_count()); |
| 429 RegisterInfo* output_info = GetOrCreateRegisterInfo(output); | 438 RegisterInfo* output_info = GetOrCreateRegisterInfo(output); |
| 430 RegisterTransfer(input_info, output_info, node->source_info_ptr()); | 439 RegisterTransfer(input_info, output_info, node->source_info()); |
| 431 } | 440 } |
| 432 | 441 |
| 433 void BytecodeRegisterOptimizer::DoStar(BytecodeNode* node) { | 442 void BytecodeRegisterOptimizer::DoStar(const BytecodeNode* const node) { |
| 434 Register output = GetRegisterOutputOperand( | 443 Register output = GetRegisterOutputOperand( |
| 435 0, node->bytecode(), node->operands(), node->operand_count()); | 444 0, node->bytecode(), node->operands(), node->operand_count()); |
| 436 RegisterInfo* output_info = GetOrCreateRegisterInfo(output); | 445 RegisterInfo* output_info = GetOrCreateRegisterInfo(output); |
| 437 RegisterTransfer(accumulator_info_, output_info, node->source_info_ptr()); | 446 RegisterTransfer(accumulator_info_, output_info, node->source_info()); |
| 438 } | 447 } |
| 439 | 448 |
| 440 void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand( | 449 void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand( |
| 441 RegisterInfo* reg_info) { | 450 RegisterInfo* reg_info) { |
| 442 if (reg_info->materialized()) { | 451 if (reg_info->materialized()) { |
| 443 CreateMaterializedEquivalent(reg_info); | 452 CreateMaterializedEquivalent(reg_info); |
| 444 } | 453 } |
| 445 max_register_index_ = | |
| 446 std::max(max_register_index_, reg_info->register_value().index()); | |
| 447 reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true); | 454 reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true); |
| 448 } | 455 } |
| 449 | 456 |
| 450 void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand( | 457 void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand( |
| 451 Register start, int count) { | 458 Register start, int count) { |
| 452 for (int i = 0; i < count; ++i) { | 459 for (int i = 0; i < count; ++i) { |
| 453 Register reg(start.index() + i); | 460 Register reg(start.index() + i); |
| 454 RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg); | 461 RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg); |
| 455 PrepareRegisterOutputOperand(reg_info); | 462 PrepareRegisterOutputOperand(reg_info); |
| 456 } | 463 } |
| (...skipping 10 matching lines...) Expand all Loading... |
| 467 } else { | 474 } else { |
| 468 RegisterInfo* equivalent_info = | 475 RegisterInfo* equivalent_info = |
| 469 GetMaterializedEquivalentNotAccumulator(reg_info); | 476 GetMaterializedEquivalentNotAccumulator(reg_info); |
| 470 return equivalent_info->register_value(); | 477 return equivalent_info->register_value(); |
| 471 } | 478 } |
| 472 } | 479 } |
| 473 | 480 |
| 474 void BytecodeRegisterOptimizer::PrepareRegisterInputOperand( | 481 void BytecodeRegisterOptimizer::PrepareRegisterInputOperand( |
| 475 BytecodeNode* const node, Register reg, int operand_index) { | 482 BytecodeNode* const node, Register reg, int operand_index) { |
| 476 Register equivalent = GetEquivalentRegisterForInputOperand(reg); | 483 Register equivalent = GetEquivalentRegisterForInputOperand(reg); |
| 477 node->UpdateOperand(operand_index, | 484 node->operands()[operand_index] = |
| 478 static_cast<uint32_t>(equivalent.ToOperand())); | 485 static_cast<uint32_t>(equivalent.ToOperand()); |
| 479 } | 486 } |
| 480 | 487 |
| 481 void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start, | 488 void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start, |
| 482 int count) { | 489 int count) { |
| 483 for (int i = 0; i < count; ++i) { | 490 for (int i = 0; i < count; ++i) { |
| 484 Register current(start.index() + i); | 491 Register current(start.index() + i); |
| 485 RegisterInfo* input_info = GetRegisterInfo(current); | 492 RegisterInfo* input_info = GetRegisterInfo(current); |
| 486 Materialize(input_info); | 493 Materialize(input_info); |
| 487 } | 494 } |
| 488 } | 495 } |
| (...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 611 if (info->materialized()) { | 618 if (info->materialized()) { |
| 612 CreateMaterializedEquivalent(info); | 619 CreateMaterializedEquivalent(info); |
| 613 } | 620 } |
| 614 info->MoveToNewEquivalenceSet(kInvalidEquivalenceId, false); | 621 info->MoveToNewEquivalenceSet(kInvalidEquivalenceId, false); |
| 615 } | 622 } |
| 616 } | 623 } |
| 617 | 624 |
| 618 } // namespace interpreter | 625 } // namespace interpreter |
| 619 } // namespace internal | 626 } // namespace internal |
| 620 } // namespace v8 | 627 } // namespace v8 |
| OLD | NEW |