Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "src/interpreter/bytecode-register-optimizer.h" | |
| 6 | |
| 7 namespace v8 { | |
| 8 namespace internal { | |
| 9 namespace interpreter { | |
| 10 | |
| 11 // A class for tracking the state of a register. This class tracks | |
| 12 // which equivalence set a register is a member of and also whether a | |
| 13 // register is materialized in the bytecode stream. | |
| 14 class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject { | |
| 15 public: | |
| 16 RegisterInfo(Register reg, uint32_t equivalence_id, bool materialized) | |
| 17 : register_(reg), | |
| 18 equivalence_id_(equivalence_id), | |
| 19 materialized_(materialized), | |
| 20 next_(this), | |
| 21 prev_(this) {} | |
| 22 | |
| 23 void AddToEquivalenceSetOf(RegisterInfo* info); | |
| 24 void MoveToNewEquivalenceSet(uint32_t equivalence_id, bool materialized); | |
| 25 bool IsOnlyMemberOfEquivalenceSet() const; | |
| 26 bool IsOnlyMaterializedMemberOfEquivalenceSet() const; | |
| 27 bool IsEquivalent(RegisterInfo* info) const; | |
| 28 | |
| 29 // Get a member of this register's equivalence set that is | |
| 30 // materialized. The materialized equivalent will be this register | |
| 31 // if it is materialized. Returns nullptr if no materialized | |
| 32 // equivalent exists. | |
| 33 RegisterInfo* GetMaterializedEquivalent(); | |
| 34 | |
| 35 // Get a member of this register's equivalence set that is | |
| 36 // materialized and not register |reg|. The materialized equivalent | |
| 37 // will be this register if it is materialized. Returns nullptr if | |
| 38 // no materialized equivalent exists. | |
| 39 RegisterInfo* GetMaterializedEquivalentOtherThan(Register reg); | |
| 40 | |
| 41 // Get a member of this register's equivalence set that is intended | |
| 42 // to be materialized in place of this register (which is currently | |
| 43 // materialized). The best candidate is deemed to be the register | |
| 44 // with the lowest index as this permits temporary registers to be | |
| 45 // removed from the bytecode stream. Returns nullptr if no candidate | |
| 46 // exists. | |
| 47 RegisterInfo* GetEquivalentToMaterialize(); | |
| 48 | |
| 49 Register register_value() const { return register_; } | |
| 50 bool materialized() const { return materialized_; } | |
| 51 void set_materialized(bool materialized) { materialized_ = materialized; } | |
| 52 void set_equivalence_id(uint32_t equivalence_id) { | |
| 53 equivalence_id_ = equivalence_id; | |
| 54 } | |
| 55 uint32_t equivalence_id() const { return equivalence_id_; } | |
| 56 | |
| 57 private: | |
| 58 Register register_; | |
| 59 uint32_t equivalence_id_; | |
| 60 bool materialized_; | |
| 61 | |
| 62 // Equivalence set pointers. | |
| 63 RegisterInfo* next_; | |
| 64 RegisterInfo* prev_; | |
| 65 | |
| 66 DISALLOW_COPY_AND_ASSIGN(RegisterInfo); | |
| 67 }; | |
| 68 | |
| 69 void BytecodeRegisterOptimizer::RegisterInfo::AddToEquivalenceSetOf( | |
| 70 RegisterInfo* info) { | |
| 71 DCHECK(!IsEquivalent(info) && IsOnlyMemberOfEquivalenceSet()); | |
| 72 // Fix old list | |
| 73 next_->prev_ = prev_; | |
| 74 prev_->next_ = next_; | |
| 75 // Add to new list. | |
| 76 next_ = info->next_; | |
| 77 prev_ = info; | |
| 78 prev_->next_ = this; | |
| 79 next_->prev_ = this; | |
| 80 set_equivalence_id(info->equivalence_id()); | |
| 81 set_materialized(false); | |
| 82 } | |
| 83 | |
| 84 void BytecodeRegisterOptimizer::RegisterInfo::MoveToNewEquivalenceSet( | |
| 85 uint32_t equivalence_id, bool materialized) { | |
| 86 next_->prev_ = prev_; | |
| 87 prev_->next_ = next_; | |
| 88 next_ = prev_ = this; | |
| 89 equivalence_id_ = equivalence_id; | |
| 90 materialized_ = materialized; | |
| 91 } | |
| 92 | |
| 93 bool BytecodeRegisterOptimizer::RegisterInfo::IsOnlyMemberOfEquivalenceSet() | |
| 94 const { | |
| 95 return this->next_ == this; | |
| 96 } | |
| 97 | |
| 98 bool BytecodeRegisterOptimizer::RegisterInfo:: | |
| 99 IsOnlyMaterializedMemberOfEquivalenceSet() const { | |
| 100 DCHECK(materialized()); | |
| 101 | |
| 102 const RegisterInfo* visitor = this->next_; | |
| 103 while (visitor != this) { | |
| 104 if (visitor->materialized()) { | |
| 105 return false; | |
| 106 } | |
| 107 visitor = visitor->next_; | |
| 108 } | |
| 109 return true; | |
| 110 } | |
| 111 | |
| 112 bool BytecodeRegisterOptimizer::RegisterInfo::IsEquivalent( | |
| 113 RegisterInfo* info) const { | |
| 114 return equivalence_id() == info->equivalence_id(); | |
| 115 } | |
| 116 | |
| 117 BytecodeRegisterOptimizer::RegisterInfo* | |
| 118 BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalent() { | |
| 119 RegisterInfo* visitor = this; | |
| 120 do { | |
| 121 if (visitor->materialized()) { | |
| 122 return visitor; | |
| 123 } | |
| 124 visitor = visitor->next_; | |
| 125 } while (visitor != this); | |
| 126 | |
| 127 return nullptr; | |
| 128 } | |
| 129 | |
| 130 BytecodeRegisterOptimizer::RegisterInfo* | |
| 131 BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalentOtherThan( | |
| 132 Register reg) { | |
| 133 RegisterInfo* visitor = this; | |
| 134 do { | |
| 135 if (visitor->materialized() && visitor->register_value() != reg) { | |
| 136 return visitor; | |
| 137 } | |
| 138 visitor = visitor->next_; | |
| 139 } while (visitor != this); | |
| 140 | |
| 141 return nullptr; | |
| 142 } | |
| 143 | |
| 144 BytecodeRegisterOptimizer::RegisterInfo* | |
| 145 BytecodeRegisterOptimizer::RegisterInfo::GetEquivalentToMaterialize() { | |
| 146 DCHECK(this->materialized()); | |
| 147 RegisterInfo* visitor = this->next_; | |
| 148 RegisterInfo* best_info = nullptr; | |
| 149 while (visitor != this) { | |
| 150 if (visitor->materialized()) { | |
| 151 return nullptr; | |
| 152 } | |
| 153 if (best_info == nullptr || | |
| 154 visitor->register_value() < best_info->register_value()) { | |
| 155 best_info = visitor; | |
| 156 } | |
| 157 visitor = visitor->next_; | |
| 158 } | |
| 159 return best_info; | |
| 160 } | |
| 161 | |
| 162 BytecodeRegisterOptimizer::BytecodeRegisterOptimizer( | |
| 163 Zone* zone, TemporaryRegisterAllocator* register_allocator, | |
| 164 int parameter_count, BytecodePipelineStage* next_stage) | |
| 165 : accumulator_(Register::virtual_accumulator()), | |
| 166 temporary_base_(register_allocator->allocation_base()), | |
| 167 register_info_table_(zone), | |
| 168 equivalence_id_(0), | |
| 169 next_stage_(next_stage), | |
| 170 flushed_(false), | |
| 171 zone_(zone) { | |
| 172 register_allocator->set_observer(this); | |
| 173 | |
| 174 // Calculate offset so register index values can be mapped into | |
| 175 // a vector of register metadata. | |
| 176 if (parameter_count != 0) { | |
| 177 register_info_table_offset_ = | |
| 178 -Register::FromParameterIndex(0, parameter_count).index(); | |
| 179 } else { | |
| 180 // TODO(oth): This path shouldn't be necessary in bytecode generated | |
| 181 // from Javascript, but a set of tests do not include the JS receiver. | |
| 182 register_info_table_offset_ = -accumulator_.index(); | |
| 183 } | |
| 184 | |
| 185 // Initialize register map for parameters, locals, and the | |
| 186 // accumulator. | |
| 187 register_info_table_.resize(register_info_table_offset_ + | |
| 188 static_cast<size_t>(temporary_base_.index())); | |
| 189 for (size_t i = 0; i < register_info_table_.size(); ++i) { | |
| 190 register_info_table_[i] = new (zone) RegisterInfo( | |
| 191 RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true); | |
| 192 DCHECK_EQ(register_info_table_[i]->register_value().index(), | |
| 193 RegisterFromRegisterInfoTableIndex(i).index()); | |
| 194 } | |
| 195 accumulator_info_ = GetRegisterInfo(accumulator_); | |
| 196 DCHECK(accumulator_info_->register_value() == accumulator_); | |
| 197 } | |
| 198 | |
| 199 void BytecodeRegisterOptimizer::FlushState() { | |
| 200 if (flushed_) { | |
| 201 return; | |
| 202 } | |
| 203 | |
| 204 // Materialize all live registers. | |
| 205 size_t count = register_info_table_.size(); | |
| 206 for (size_t i = 0; i < count; ++i) { | |
| 207 RegisterInfo* reg_info = register_info_table_[i]; | |
| 208 if (!reg_info->materialized() && | |
| 209 !reg_info->IsOnlyMemberOfEquivalenceSet()) { | |
| 210 DCHECK(RegisterIsTemporary(reg_info->register_value()) || | |
| 211 reg_info->register_value() == accumulator_); | |
| 212 Materialize(reg_info); | |
| 213 } | |
| 214 } | |
| 215 | |
| 216 // Break all existing equivalences. | |
| 217 for (size_t i = 0; i < count; ++i) { | |
| 218 RegisterInfo* reg_info = register_info_table_[i]; | |
| 219 if (!reg_info->IsOnlyMemberOfEquivalenceSet()) { | |
| 220 reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true); | |
| 221 } | |
| 222 } | |
| 223 flushed_ = true; | |
| 224 } | |
| 225 | |
| 226 // override | |
| 227 void BytecodeRegisterOptimizer::FlushBasicBlock() { | |
| 228 FlushState(); | |
| 229 next_stage_->FlushBasicBlock(); | |
| 230 } | |
| 231 | |
| 232 // override | |
| 233 size_t BytecodeRegisterOptimizer::FlushForOffset() { | |
| 234 FlushState(); | |
| 235 return next_stage_->FlushForOffset(); | |
| 236 } | |
| 237 | |
| 238 // override | |
| 239 void BytecodeRegisterOptimizer::Write(BytecodeNode* node) { | |
| 240 flushed_ = false; | |
| 241 | |
| 242 // | |
| 243 // Transfers with observable registers as the destination will be | |
| 244 // immediately materialized so the source position information will | |
| 245 // be ordered correctly. | |
| 246 // | |
| 247 // Transfers without observable destination registers will initially | |
| 248 // be emitted as Nop's with the source position. They may, or may | |
| 249 // not, be materialized by the optimizer. However, the source | |
| 250 // position is not lost and being attached to a Nop is fine as the | |
| 251 // destination register is not observable in the debugger. | |
| 252 // | |
| 253 switch (node->bytecode()) { | |
| 254 case Bytecode::kLdar: { | |
| 255 DoLdar(node); | |
| 256 return; | |
| 257 } | |
| 258 case Bytecode::kStar: { | |
| 259 DoStar(node); | |
| 260 return; | |
| 261 } | |
| 262 case Bytecode::kMov: { | |
| 263 DoMov(node); | |
| 264 return; | |
| 265 } | |
| 266 default: | |
| 267 break; | |
| 268 } | |
| 269 | |
| 270 if (Bytecodes::IsJump(node->bytecode()) || | |
| 271 node->bytecode() == Bytecode::kDebugger) { | |
| 272 // The debugger can manipulate locals and parameters, flush | |
| 273 // everything before handing over to it. Similarly, all state must | |
| 274 // be flushed before emitting a jump due to how bytecode offsets | |
| 275 // for jumps are evaluated. | |
| 276 FlushState(); | |
| 277 } | |
| 278 | |
| 279 PrepareOperands(node); | |
| 280 WriteToNextStage(node); | |
| 281 } | |
| 282 | |
| 283 void BytecodeRegisterOptimizer::WriteToNextStage(BytecodeNode* node) const { | |
| 284 next_stage_->Write(node); | |
| 285 } | |
| 286 | |
| 287 void BytecodeRegisterOptimizer::WriteToNextStage( | |
| 288 BytecodeNode* node, const BytecodeSourceInfo& source_info) const { | |
| 289 node->source_info().Update(source_info); | |
| 290 next_stage_->Write(node); | |
| 291 } | |
| 292 | |
| 293 void BytecodeRegisterOptimizer::OutputRegisterTransfer( | |
| 294 RegisterInfo* input_info, RegisterInfo* output_info, | |
| 295 const BytecodeSourceInfo& source_info) { | |
| 296 Register input = input_info->register_value(); | |
| 297 Register output = output_info->register_value(); | |
| 298 DCHECK_NE(input.index(), output.index()); | |
| 299 | |
| 300 if (input == accumulator_) { | |
| 301 uint32_t operand = static_cast<uint32_t>(output.ToOperand()); | |
| 302 OperandScale scale = Bytecodes::OperandSizesToScale(output.SizeOfOperand()); | |
| 303 BytecodeNode node(Bytecode::kStar, operand, scale); | |
| 304 WriteToNextStage(&node, source_info); | |
| 305 } else if (output == accumulator_) { | |
| 306 uint32_t operand = static_cast<uint32_t>(input.ToOperand()); | |
| 307 OperandScale scale = Bytecodes::OperandSizesToScale(input.SizeOfOperand()); | |
| 308 BytecodeNode node(Bytecode::kLdar, operand, scale); | |
| 309 WriteToNextStage(&node, source_info); | |
| 310 } else { | |
| 311 uint32_t operand0 = static_cast<uint32_t>(input.ToOperand()); | |
| 312 uint32_t operand1 = static_cast<uint32_t>(output.ToOperand()); | |
| 313 OperandScale scale = Bytecodes::OperandSizesToScale(input.SizeOfOperand(), | |
| 314 output.SizeOfOperand()); | |
| 315 BytecodeNode node(Bytecode::kMov, operand0, operand1, scale); | |
| 316 WriteToNextStage(&node, source_info); | |
| 317 } | |
| 318 output_info->set_materialized(true); | |
| 319 } | |
| 320 | |
| 321 void BytecodeRegisterOptimizer::CreateMaterializedEquivalentIfRequired( | |
| 322 RegisterInfo* info) { | |
| 323 if (info->materialized()) { | |
| 324 RegisterInfo* unmaterialized = info->GetEquivalentToMaterialize(); | |
| 325 if (unmaterialized) { | |
| 326 OutputRegisterTransfer(info, unmaterialized); | |
| 327 } | |
| 328 } | |
| 329 } | |
| 330 | |
| 331 BytecodeRegisterOptimizer::RegisterInfo* | |
| 332 BytecodeRegisterOptimizer::GetMaterializedEquivalent(RegisterInfo* info) { | |
| 333 return info->materialized() ? info : info->GetMaterializedEquivalent(); | |
| 334 } | |
| 335 | |
| 336 BytecodeRegisterOptimizer::RegisterInfo* | |
| 337 BytecodeRegisterOptimizer::GetMaterializedEquivalentNotAccumulator( | |
| 338 RegisterInfo* info) { | |
| 339 if (info->materialized()) { | |
| 340 return info; | |
| 341 } | |
| 342 | |
| 343 RegisterInfo* result = info->GetMaterializedEquivalentOtherThan(accumulator_); | |
| 344 if (result == nullptr) { | |
| 345 Materialize(info); | |
| 346 result = info; | |
| 347 } | |
| 348 DCHECK(result->register_value() != accumulator_); | |
| 349 return result; | |
| 350 } | |
| 351 | |
| 352 void BytecodeRegisterOptimizer::Materialize(RegisterInfo* info) { | |
| 353 if (!info->materialized()) { | |
| 354 RegisterInfo* materialized = info->GetMaterializedEquivalent(); | |
| 355 OutputRegisterTransfer(materialized, info); | |
| 356 } | |
| 357 } | |
| 358 | |
| 359 void BytecodeRegisterOptimizer::RegisterTransfer( | |
| 360 RegisterInfo* input_info, RegisterInfo* output_info, | |
| 361 const BytecodeSourceInfo& source_info) { | |
| 362 // Ensure value associated with output is materialized into a | |
| 363 // different register if necessary. | |
| 364 CreateMaterializedEquivalentIfRequired(output_info); | |
| 365 bool output_is_observable = | |
| 366 RegisterIsObservable(output_info->register_value()); | |
| 367 | |
| 368 // If the output and input are not equivalent or the output is | |
| 369 // visible to the user/debugger, update equivalence set and | |
| 370 // potentially emit store if the transfer is observable. | |
| 371 if (!output_info->IsEquivalent(input_info) || output_is_observable) { | |
| 372 // Add to the equivalence set of |input_info|. | |
| 373 RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent(); | |
| 374 output_info->AddToEquivalenceSetOf(materialized_info); | |
| 375 if (output_is_observable) { | |
| 376 OutputRegisterTransfer(materialized_info, output_info, source_info); | |
| 377 return; | |
| 378 } | |
| 379 } | |
| 380 | |
| 381 if (source_info.is_valid()) { | |
| 382 EmitNopForSourceInfo(source_info); | |
| 383 } | |
| 384 } | |
| 385 | |
| 386 void BytecodeRegisterOptimizer::EmitNopForSourceInfo( | |
| 387 const BytecodeSourceInfo& source_info) const { | |
| 388 BytecodeNode nop(Bytecode::kNop); | |
| 389 nop.source_info().Update(source_info); | |
| 390 WriteToNextStage(&nop); | |
| 391 } | |
| 392 | |
| 393 void BytecodeRegisterOptimizer::DoLdar(const BytecodeNode* const node) { | |
| 394 Register input = GetRegisterInputOperand( | |
| 395 0, node->bytecode(), node->operands(), node->operand_count()); | |
| 396 RegisterInfo* input_info = GetRegisterInfo(input); | |
| 397 RegisterTransfer(input_info, accumulator_info_, node->source_info()); | |
| 398 } | |
| 399 | |
| 400 void BytecodeRegisterOptimizer::DoMov(const BytecodeNode* const node) { | |
| 401 Register input = GetRegisterInputOperand( | |
| 402 0, node->bytecode(), node->operands(), node->operand_count()); | |
| 403 RegisterInfo* input_info = GetRegisterInfo(input); | |
| 404 Register output = GetRegisterOutputOperand( | |
| 405 1, node->bytecode(), node->operands(), node->operand_count()); | |
| 406 RegisterInfo* output_info = GetOrCreateRegisterInfo(output); | |
| 407 RegisterTransfer(input_info, output_info, node->source_info()); | |
| 408 } | |
| 409 | |
| 410 void BytecodeRegisterOptimizer::DoStar(const BytecodeNode* const node) { | |
| 411 Register output = GetRegisterOutputOperand( | |
| 412 0, node->bytecode(), node->operands(), node->operand_count()); | |
| 413 RegisterInfo* output_info = GetOrCreateRegisterInfo(output); | |
| 414 RegisterTransfer(accumulator_info_, output_info, node->source_info()); | |
| 415 } | |
| 416 | |
| 417 void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand( | |
| 418 RegisterInfo* reg_info) { | |
| 419 CreateMaterializedEquivalentIfRequired(reg_info); | |
| 420 reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true); | |
| 421 } | |
| 422 | |
| 423 void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand( | |
| 424 Register start, int count) { | |
| 425 for (int i = 0; i < count; ++i) { | |
| 426 Register reg(start.index() + i); | |
| 427 RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg); | |
| 428 PrepareRegisterOutputOperand(reg_info); | |
| 429 } | |
| 430 } | |
| 431 | |
| 432 Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand( | |
| 433 Register reg) { | |
| 434 // For a temporary register, RegInfo state may need be created. For | |
| 435 // locals and parameters, the RegInfo state is created in the | |
| 436 // BytecodeRegisterOptimizer constructor. | |
| 437 RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg); | |
| 438 if (reg_info->materialized()) { | |
| 439 return reg; | |
| 440 } else { | |
| 441 RegisterInfo* equivalent_info = | |
| 442 GetMaterializedEquivalentNotAccumulator(reg_info); | |
| 443 return equivalent_info->register_value(); | |
| 444 } | |
| 445 } | |
| 446 | |
| 447 void BytecodeRegisterOptimizer::PrepareRegisterInputOperand( | |
| 448 BytecodeNode* const node, Register reg, int operand_index) { | |
| 449 Register equivalent = GetEquivalentRegisterForInputOperand(reg); | |
| 450 node->operands()[operand_index] = | |
| 451 static_cast<uint32_t>(equivalent.ToOperand()); | |
| 452 // Update operand scale as equivalent may be different. | |
| 453 OperandScale operand_scale = | |
| 454 Bytecodes::OperandSizesToScale(equivalent.SizeOfOperand()); | |
| 455 if (operand_scale > node->operand_scale()) { | |
| 456 node->set_operand_scale(operand_scale); | |
| 457 } | |
| 458 } | |
| 459 | |
| 460 void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start, | |
| 461 int count) { | |
| 462 for (int i = 0; i < count; ++i) { | |
| 463 Register current(start.index() + i); | |
| 464 RegisterInfo* input_info = GetRegisterInfo(current); | |
| 465 Materialize(input_info); | |
| 466 } | |
| 467 } | |
| 468 | |
| 469 void BytecodeRegisterOptimizer::PrepareRegisterOperands( | |
| 470 BytecodeNode* const node) { | |
| 471 // | |
| 472 // For each input operand, get a materialized equivalent if it is | |
| 473 // just a single register, otherwise materialize register range. | |
| 474 // Update operand_scale if necessary. | |
| 475 // | |
| 476 // For each output register about to be clobbered, materialize an | |
| 477 // equivalent if it exists. Put each register in it's own equivalence set. | |
| 478 // | |
| 479 int register_operand_bitmap = | |
| 480 Bytecodes::GetRegisterOperandBitmap(node->bytecode()); | |
| 481 const OperandType* operand_types = | |
| 482 Bytecodes::GetOperandTypes(node->bytecode()); | |
| 483 uint32_t* operands = node->operands(); | |
| 484 for (int i = 0; register_operand_bitmap != 0; | |
| 485 ++i, register_operand_bitmap >>= 1) { | |
| 486 if ((register_operand_bitmap & 1) == 0) { | |
| 487 continue; | |
| 488 } | |
| 489 OperandType operand_type = operand_types[i]; | |
| 490 int count = 0; | |
| 491 if (operand_types[i + 1] == OperandType::kRegCount) { | |
| 492 count = static_cast<int>(operands[i + 1]); | |
| 493 if (count == 0) { | |
| 494 continue; | |
| 495 } | |
| 496 } else { | |
| 497 count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type); | |
| 498 } | |
| 499 | |
| 500 Register reg = Register::FromOperand(static_cast<int32_t>(operands[i])); | |
| 501 if (Bytecodes::IsRegisterInputOperandType(operand_type)) { | |
| 502 if (count == 1) { | |
| 503 PrepareRegisterInputOperand(node, reg, i); | |
| 504 } else if (count > 1) { | |
| 505 PrepareRegisterRangeInputOperand(reg, count); | |
| 506 } | |
| 507 } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) { | |
| 508 PrepareRegisterRangeOutputOperand(reg, count); | |
| 509 } | |
| 510 } | |
| 511 } | |
| 512 | |
| 513 void BytecodeRegisterOptimizer::PrepareAccumulator(BytecodeNode* const node) { | |
| 514 // Materialize the accumulator if it is read by the bytecode. The | |
| 515 // accumulator is special and no other register can be materialized | |
| 516 // in it's place. | |
| 517 if (Bytecodes::ReadsAccumulator(node->bytecode()) && | |
| 518 !accumulator_info_->materialized()) { | |
| 519 Materialize(accumulator_info_); | |
| 520 } | |
| 521 | |
| 522 // Materialize an equivalent to the accumulator if it will be | |
| 523 // clobbered when the bytecode is dispatched. | |
| 524 if (Bytecodes::WritesAccumulator(node->bytecode())) { | |
| 525 PrepareRegisterOutputOperand(accumulator_info_); | |
| 526 } | |
| 527 } | |
| 528 | |
| 529 void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* const node) { | |
| 530 PrepareAccumulator(node); | |
| 531 PrepareRegisterOperands(node); | |
| 532 } | |
| 533 | |
| 534 // static | |
| 535 Register BytecodeRegisterOptimizer::GetRegisterInputOperand( | |
| 536 int index, Bytecode bytecode, const uint32_t* operands, int operand_count) { | |
| 537 DCHECK_LT(index, operand_count); | |
| 538 DCHECK(Bytecodes::IsRegisterInputOperandType( | |
| 539 Bytecodes::GetOperandType(bytecode, index))); | |
| 540 return OperandToRegister(operands[index]); | |
| 541 } | |
| 542 | |
| 543 // static | |
| 544 Register BytecodeRegisterOptimizer::GetRegisterOutputOperand( | |
| 545 int index, Bytecode bytecode, const uint32_t* operands, int operand_count) { | |
| 546 DCHECK_LT(index, operand_count); | |
| 547 DCHECK(Bytecodes::IsRegisterOutputOperandType( | |
| 548 Bytecodes::GetOperandType(bytecode, index))); | |
| 549 return OperandToRegister(operands[index]); | |
| 550 } | |
| 551 | |
| 552 BytecodeRegisterOptimizer::RegisterInfo* | |
| 553 BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) { | |
| 554 size_t index = GetRegisterInfoTableIndex(reg); | |
| 555 return (index < register_info_table_.size()) ? register_info_table_[index] | |
| 556 : nullptr; | |
| 557 } | |
| 558 | |
| 559 BytecodeRegisterOptimizer::RegisterInfo* | |
| 560 BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) { | |
| 561 size_t index = GetRegisterInfoTableIndex(reg); | |
| 562 return index < register_info_table_.size() ? register_info_table_[index] | |
| 563 : NewRegisterInfo(reg); | |
| 564 } | |
| 565 | |
| 566 BytecodeRegisterOptimizer::RegisterInfo* | |
| 567 BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) { | |
| 568 size_t index = GetRegisterInfoTableIndex(reg); | |
| 569 DCHECK_GE(index, register_info_table_.size()); | |
| 570 GrowRegisterMap(reg); | |
| 571 return register_info_table_[index]; | |
| 572 } | |
| 573 | |
| 574 void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) { | |
|
rmcilroy
2016/05/27 09:18:58
GrowRegisterInfoTable
| |
| 575 DCHECK(RegisterIsTemporary(reg)); | |
| 576 size_t index = GetRegisterInfoTableIndex(reg); | |
| 577 DCHECK_GE(index, register_info_table_.size()); | |
| 578 size_t new_size = index + 1; | |
| 579 size_t old_size = register_info_table_.size(); | |
| 580 register_info_table_.resize(new_size); | |
| 581 for (size_t i = old_size; i < new_size; ++i) { | |
| 582 register_info_table_[i] = new (zone()) RegisterInfo( | |
| 583 RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), false); | |
| 584 } | |
| 585 } | |
| 586 | |
| 587 void BytecodeRegisterOptimizer::TemporaryRegisterFreeEvent(Register reg) { | |
| 588 RegisterInfo* info = GetRegisterInfo(reg); | |
| 589 if (info != nullptr) { | |
| 590 // If register is materialized and part of equivalence set, make | |
| 591 // sure another member of the set holds the value before the | |
| 592 // temporary register is removed. | |
| 593 CreateMaterializedEquivalentIfRequired(info); | |
| 594 info->MoveToNewEquivalenceSet(kInvalidEquivalenceId, false); | |
| 595 } | |
| 596 } | |
| 597 | |
| 598 } // namespace interpreter | |
| 599 } // namespace internal | |
| 600 } // namespace v8 | |
| OLD | NEW |