OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "src/compiler/instruction-selector.h" |
| 6 |
| 7 #include "src/compiler/instruction-selector-impl.h" |
| 8 #include "src/compiler/node-matchers.h" |
| 9 #include "src/compiler/node-properties-inl.h" |
| 10 |
| 11 namespace v8 { |
| 12 namespace internal { |
| 13 namespace compiler { |
| 14 |
| 15 InstructionSelector::InstructionSelector(InstructionSequence* sequence, |
| 16 SourcePositionTable* source_positions) |
| 17 : zone_(sequence->isolate()), |
| 18 sequence_(sequence), |
| 19 source_positions_(source_positions), |
| 20 current_block_(NULL), |
| 21 instructions_(InstructionDeque::allocator_type(zone())), |
| 22 used_(graph()->NodeCount(), false, BoolVector::allocator_type(zone())) {} |
| 23 |
| 24 |
| 25 void InstructionSelector::SelectInstructions() { |
| 26 // Mark the inputs of all phis in loop headers as used. |
| 27 BasicBlockVector* blocks = schedule()->rpo_order(); |
| 28 for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) { |
| 29 BasicBlock* block = *i; |
| 30 if (!block->IsLoopHeader()) continue; |
| 31 ASSERT_NE(0, block->PredecessorCount()); |
| 32 ASSERT_NE(1, block->PredecessorCount()); |
| 33 for (BasicBlock::const_iterator j = block->begin(); j != block->end(); |
| 34 ++j) { |
| 35 Node* phi = *j; |
| 36 if (phi->opcode() != IrOpcode::kPhi) continue; |
| 37 |
| 38 // Mark all inputs as used. |
| 39 Node::Inputs inputs = phi->inputs(); |
| 40 for (InputIter k = inputs.begin(); k != inputs.end(); ++k) { |
| 41 MarkAsUsed(*k); |
| 42 } |
| 43 } |
| 44 } |
| 45 |
| 46 // Visit each basic block in post order. |
| 47 for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) { |
| 48 VisitBlock(*i); |
| 49 } |
| 50 |
| 51 // Schedule the selected instructions. |
| 52 for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) { |
| 53 BasicBlock* block = *i; |
| 54 size_t end = block->code_end_; |
| 55 size_t start = block->code_start_; |
| 56 sequence()->StartBlock(block); |
| 57 while (start-- > end) { |
| 58 sequence()->AddInstruction(instructions_[start], block); |
| 59 } |
| 60 sequence()->EndBlock(block); |
| 61 } |
| 62 } |
| 63 |
| 64 |
| 65 Instruction* InstructionSelector::Emit(InstructionCode opcode, |
| 66 InstructionOperand* output, |
| 67 size_t temp_count, |
| 68 InstructionOperand** temps) { |
| 69 size_t output_count = output == NULL ? 0 : 1; |
| 70 return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps); |
| 71 } |
| 72 |
| 73 |
| 74 Instruction* InstructionSelector::Emit(InstructionCode opcode, |
| 75 InstructionOperand* output, |
| 76 InstructionOperand* a, size_t temp_count, |
| 77 InstructionOperand** temps) { |
| 78 size_t output_count = output == NULL ? 0 : 1; |
| 79 return Emit(opcode, output_count, &output, 1, &a, temp_count, temps); |
| 80 } |
| 81 |
| 82 |
| 83 Instruction* InstructionSelector::Emit(InstructionCode opcode, |
| 84 InstructionOperand* output, |
| 85 InstructionOperand* a, |
| 86 InstructionOperand* b, size_t temp_count, |
| 87 InstructionOperand** temps) { |
| 88 size_t output_count = output == NULL ? 0 : 1; |
| 89 InstructionOperand* inputs[] = {a, b}; |
| 90 size_t input_count = ARRAY_SIZE(inputs); |
| 91 return Emit(opcode, output_count, &output, input_count, inputs, temp_count, |
| 92 temps); |
| 93 } |
| 94 |
| 95 |
| 96 Instruction* InstructionSelector::Emit(InstructionCode opcode, |
| 97 InstructionOperand* output, |
| 98 InstructionOperand* a, |
| 99 InstructionOperand* b, |
| 100 InstructionOperand* c, size_t temp_count, |
| 101 InstructionOperand** temps) { |
| 102 size_t output_count = output == NULL ? 0 : 1; |
| 103 InstructionOperand* inputs[] = {a, b, c}; |
| 104 size_t input_count = ARRAY_SIZE(inputs); |
| 105 return Emit(opcode, output_count, &output, input_count, inputs, temp_count, |
| 106 temps); |
| 107 } |
| 108 |
| 109 |
| 110 Instruction* InstructionSelector::Emit( |
| 111 InstructionCode opcode, InstructionOperand* output, InstructionOperand* a, |
| 112 InstructionOperand* b, InstructionOperand* c, InstructionOperand* d, |
| 113 size_t temp_count, InstructionOperand** temps) { |
| 114 size_t output_count = output == NULL ? 0 : 1; |
| 115 InstructionOperand* inputs[] = {a, b, c, d}; |
| 116 size_t input_count = ARRAY_SIZE(inputs); |
| 117 return Emit(opcode, output_count, &output, input_count, inputs, temp_count, |
| 118 temps); |
| 119 } |
| 120 |
| 121 |
| 122 Instruction* InstructionSelector::Emit( |
| 123 InstructionCode opcode, size_t output_count, InstructionOperand** outputs, |
| 124 size_t input_count, InstructionOperand** inputs, size_t temp_count, |
| 125 InstructionOperand** temps) { |
| 126 Instruction* instr = |
| 127 Instruction::New(instruction_zone(), opcode, output_count, outputs, |
| 128 input_count, inputs, temp_count, temps); |
| 129 return Emit(instr); |
| 130 } |
| 131 |
| 132 |
| 133 Instruction* InstructionSelector::Emit(Instruction* instr) { |
| 134 instructions_.push_back(instr); |
| 135 return instr; |
| 136 } |
| 137 |
| 138 |
| 139 bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const { |
| 140 return block->rpo_number_ == (current_block_->rpo_number_ + 1) && |
| 141 block->deferred_ == current_block_->deferred_; |
| 142 } |
| 143 |
| 144 |
| 145 bool InstructionSelector::CanCover(Node* user, Node* node) const { |
| 146 return node->OwnedBy(user) && |
| 147 schedule()->block(node) == schedule()->block(user); |
| 148 } |
| 149 |
| 150 |
| 151 bool InstructionSelector::IsUsed(Node* node) const { |
| 152 if (!node->op()->HasProperty(Operator::kEliminatable)) return true; |
| 153 NodeId id = node->id(); |
| 154 ASSERT(id >= 0); |
| 155 ASSERT(id < static_cast<NodeId>(used_.size())); |
| 156 return used_[id]; |
| 157 } |
| 158 |
| 159 |
| 160 void InstructionSelector::MarkAsUsed(Node* node) { |
| 161 ASSERT_NOT_NULL(node); |
| 162 NodeId id = node->id(); |
| 163 ASSERT(id >= 0); |
| 164 ASSERT(id < static_cast<NodeId>(used_.size())); |
| 165 used_[id] = true; |
| 166 } |
| 167 |
| 168 |
| 169 bool InstructionSelector::IsDouble(const Node* node) const { |
| 170 ASSERT_NOT_NULL(node); |
| 171 return sequence()->IsDouble(node->id()); |
| 172 } |
| 173 |
| 174 |
| 175 void InstructionSelector::MarkAsDouble(Node* node) { |
| 176 ASSERT_NOT_NULL(node); |
| 177 ASSERT(!IsReference(node)); |
| 178 sequence()->MarkAsDouble(node->id()); |
| 179 |
| 180 // Propagate "doubleness" throughout phis. |
| 181 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) { |
| 182 Node* user = *i; |
| 183 if (user->opcode() != IrOpcode::kPhi) continue; |
| 184 if (IsDouble(user)) continue; |
| 185 MarkAsDouble(user); |
| 186 } |
| 187 } |
| 188 |
| 189 |
| 190 bool InstructionSelector::IsReference(const Node* node) const { |
| 191 ASSERT_NOT_NULL(node); |
| 192 return sequence()->IsReference(node->id()); |
| 193 } |
| 194 |
| 195 |
| 196 void InstructionSelector::MarkAsReference(Node* node) { |
| 197 ASSERT_NOT_NULL(node); |
| 198 ASSERT(!IsDouble(node)); |
| 199 sequence()->MarkAsReference(node->id()); |
| 200 |
| 201 // Propagate "referenceness" throughout phis. |
| 202 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) { |
| 203 Node* user = *i; |
| 204 if (user->opcode() != IrOpcode::kPhi) continue; |
| 205 if (IsReference(user)) continue; |
| 206 MarkAsReference(user); |
| 207 } |
| 208 } |
| 209 |
| 210 |
| 211 void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep, |
| 212 Node* node) { |
| 213 ASSERT_NOT_NULL(node); |
| 214 if (rep == kMachineFloat64) MarkAsDouble(node); |
| 215 if (rep == kMachineTagged) MarkAsReference(node); |
| 216 } |
| 217 |
| 218 |
| 219 // TODO(bmeurer): Get rid of the CallBuffer business and make |
| 220 // InstructionSelector::VisitCall platform independent instead. |
| 221 CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d) |
| 222 : output_count(0), |
| 223 descriptor(d), |
| 224 output_nodes(zone->NewArray<Node*>(d->ReturnCount())), |
| 225 outputs(zone->NewArray<InstructionOperand*>(d->ReturnCount())), |
| 226 fixed_and_control_args( |
| 227 zone->NewArray<InstructionOperand*>(input_count() + control_count())), |
| 228 fixed_count(0), |
| 229 pushed_nodes(zone->NewArray<Node*>(input_count())), |
| 230 pushed_count(0) { |
| 231 if (d->ReturnCount() > 1) { |
| 232 memset(output_nodes, 0, sizeof(Node*) * d->ReturnCount()); // NOLINT |
| 233 } |
| 234 memset(pushed_nodes, 0, sizeof(Node*) * input_count()); // NOLINT |
| 235 } |
| 236 |
| 237 |
| 238 // TODO(bmeurer): Get rid of the CallBuffer business and make |
| 239 // InstructionSelector::VisitCall platform independent instead. |
| 240 void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, |
| 241 bool call_code_immediate, |
| 242 bool call_address_immediate, |
| 243 BasicBlock* cont_node, |
| 244 BasicBlock* deopt_node) { |
| 245 OperandGenerator g(this); |
| 246 ASSERT_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount()); |
| 247 ASSERT_EQ(NodeProperties::GetValueInputCount(call), buffer->input_count()); |
| 248 |
| 249 if (buffer->descriptor->ReturnCount() > 0) { |
| 250 // Collect the projections that represent multiple outputs from this call. |
| 251 if (buffer->descriptor->ReturnCount() == 1) { |
| 252 buffer->output_nodes[0] = call; |
| 253 } else { |
| 254 // Iterate over all uses of {call} and collect the projections into the |
| 255 // {result} buffer. |
| 256 for (UseIter i = call->uses().begin(); i != call->uses().end(); ++i) { |
| 257 if ((*i)->opcode() == IrOpcode::kProjection) { |
| 258 int index = OpParameter<int32_t>(*i); |
| 259 ASSERT_GE(index, 0); |
| 260 ASSERT_LT(index, buffer->descriptor->ReturnCount()); |
| 261 ASSERT_EQ(NULL, buffer->output_nodes[index]); |
| 262 buffer->output_nodes[index] = *i; |
| 263 } |
| 264 } |
| 265 } |
| 266 |
| 267 // Filter out the outputs that aren't live because no projection uses them. |
| 268 for (int i = 0; i < buffer->descriptor->ReturnCount(); i++) { |
| 269 if (buffer->output_nodes[i] != NULL) { |
| 270 Node* output = buffer->output_nodes[i]; |
| 271 LinkageLocation location = buffer->descriptor->GetReturnLocation(i); |
| 272 MarkAsRepresentation(location.representation(), output); |
| 273 buffer->outputs[buffer->output_count++] = |
| 274 g.DefineAsLocation(output, location); |
| 275 } |
| 276 } |
| 277 } |
| 278 |
| 279 buffer->fixed_count = 1; // First argument is always the callee. |
| 280 Node* callee = call->InputAt(0); |
| 281 switch (buffer->descriptor->kind()) { |
| 282 case CallDescriptor::kCallCodeObject: |
| 283 buffer->fixed_and_control_args[0] = |
| 284 (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant) |
| 285 ? g.UseImmediate(callee) |
| 286 : g.UseRegister(callee); |
| 287 break; |
| 288 case CallDescriptor::kCallAddress: |
| 289 buffer->fixed_and_control_args[0] = |
| 290 (call_address_immediate && |
| 291 (callee->opcode() == IrOpcode::kInt32Constant || |
| 292 callee->opcode() == IrOpcode::kInt64Constant)) |
| 293 ? g.UseImmediate(callee) |
| 294 : g.UseRegister(callee); |
| 295 break; |
| 296 case CallDescriptor::kCallJSFunction: |
| 297 buffer->fixed_and_control_args[0] = |
| 298 g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)); |
| 299 break; |
| 300 } |
| 301 |
| 302 int input_count = buffer->input_count(); |
| 303 |
| 304 // Split the arguments into pushed_nodes and fixed_args. Pushed arguments |
| 305 // require an explicit push instruction before the call and do not appear |
| 306 // as arguments to the call. Everything else ends up as an InstructionOperand |
| 307 // argument to the call. |
| 308 InputIter iter(call->inputs().begin()); |
| 309 for (int index = 0; index < input_count; ++iter, ++index) { |
| 310 ASSERT(iter != call->inputs().end()); |
| 311 ASSERT(index == iter.index()); |
| 312 if (index == 0) continue; // The first argument (callee) is already done. |
| 313 InstructionOperand* op = |
| 314 g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index)); |
| 315 if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) { |
| 316 int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1; |
| 317 ASSERT(buffer->pushed_nodes[stack_index] == NULL); |
| 318 buffer->pushed_nodes[stack_index] = *iter; |
| 319 buffer->pushed_count++; |
| 320 } else { |
| 321 buffer->fixed_and_control_args[buffer->fixed_count] = op; |
| 322 buffer->fixed_count++; |
| 323 } |
| 324 } |
| 325 |
| 326 // If the call can deoptimize, we add the continuation and deoptimization |
| 327 // block labels. |
| 328 if (buffer->descriptor->CanLazilyDeoptimize()) { |
| 329 ASSERT(cont_node != NULL); |
| 330 ASSERT(deopt_node != NULL); |
| 331 buffer->fixed_and_control_args[buffer->fixed_count] = g.Label(cont_node); |
| 332 buffer->fixed_and_control_args[buffer->fixed_count + 1] = |
| 333 g.Label(deopt_node); |
| 334 } else { |
| 335 ASSERT(cont_node == NULL); |
| 336 ASSERT(deopt_node == NULL); |
| 337 } |
| 338 |
| 339 ASSERT(input_count == (buffer->fixed_count + buffer->pushed_count)); |
| 340 } |
| 341 |
| 342 |
| 343 void InstructionSelector::VisitBlock(BasicBlock* block) { |
| 344 ASSERT_EQ(NULL, current_block_); |
| 345 current_block_ = block; |
| 346 size_t current_block_end = instructions_.size(); |
| 347 |
| 348 // Generate code for the block control "top down", but schedule the code |
| 349 // "bottom up". |
| 350 VisitControl(block); |
| 351 std::reverse(instructions_.begin() + current_block_end, instructions_.end()); |
| 352 |
| 353 // Visit code in reverse control flow order, because architecture-specific |
| 354 // matching may cover more than one node at a time. |
| 355 for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend(); |
| 356 ++i) { |
| 357 Node* node = *i; |
| 358 if (!IsUsed(node)) continue; |
| 359 // Generate code for this node "top down", but schedule the code "bottom |
| 360 // up". |
| 361 size_t current_node_end = instructions_.size(); |
| 362 VisitNode(node); |
| 363 std::reverse(instructions_.begin() + current_node_end, instructions_.end()); |
| 364 } |
| 365 |
| 366 // We're done with the block. |
| 367 // TODO(bmeurer): We should not mutate the schedule. |
| 368 block->code_end_ = current_block_end; |
| 369 block->code_start_ = instructions_.size(); |
| 370 |
| 371 current_block_ = NULL; |
| 372 } |
| 373 |
| 374 |
| 375 static inline void CheckNoPhis(const BasicBlock* block) { |
| 376 #ifdef DEBUG |
| 377 // Branch targets should not have phis. |
| 378 for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) { |
| 379 const Node* node = *i; |
| 380 CHECK_NE(IrOpcode::kPhi, node->opcode()); |
| 381 } |
| 382 #endif |
| 383 } |
| 384 |
| 385 |
| 386 void InstructionSelector::VisitControl(BasicBlock* block) { |
| 387 Node* input = block->control_input_; |
| 388 switch (block->control_) { |
| 389 case BasicBlockData::kGoto: |
| 390 return VisitGoto(block->SuccessorAt(0)); |
| 391 case BasicBlockData::kBranch: { |
| 392 ASSERT_EQ(IrOpcode::kBranch, input->opcode()); |
| 393 BasicBlock* tbranch = block->SuccessorAt(0); |
| 394 BasicBlock* fbranch = block->SuccessorAt(1); |
| 395 // SSA deconstruction requires targets of branches not to have phis. |
| 396 // Edge split form guarantees this property, but is more strict. |
| 397 CheckNoPhis(tbranch); |
| 398 CheckNoPhis(fbranch); |
| 399 if (tbranch == fbranch) return VisitGoto(tbranch); |
| 400 return VisitBranch(input, tbranch, fbranch); |
| 401 } |
| 402 case BasicBlockData::kReturn: { |
| 403 // If the result itself is a return, return its input. |
| 404 Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn) |
| 405 ? input->InputAt(0) |
| 406 : input; |
| 407 return VisitReturn(value); |
| 408 } |
| 409 case BasicBlockData::kThrow: |
| 410 return VisitThrow(input); |
| 411 case BasicBlockData::kDeoptimize: |
| 412 return VisitDeoptimization(input); |
| 413 case BasicBlockData::kCall: { |
| 414 BasicBlock* deoptimization = block->SuccessorAt(0); |
| 415 BasicBlock* continuation = block->SuccessorAt(1); |
| 416 VisitCall(input, continuation, deoptimization); |
| 417 break; |
| 418 } |
| 419 case BasicBlockData::kNone: { |
| 420 // TODO(titzer): exit block doesn't have control. |
| 421 ASSERT(input == NULL); |
| 422 break; |
| 423 } |
| 424 default: |
| 425 UNREACHABLE(); |
| 426 break; |
| 427 } |
| 428 } |
| 429 |
| 430 |
| 431 void InstructionSelector::VisitNode(Node* node) { |
| 432 ASSERT_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes. |
| 433 SourcePosition source_position = source_positions_->GetSourcePosition(node); |
| 434 if (!source_position.IsUnknown()) { |
| 435 ASSERT(!source_position.IsInvalid()); |
| 436 if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) { |
| 437 Emit(SourcePositionInstruction::New(instruction_zone(), source_position)); |
| 438 } |
| 439 } |
| 440 switch (node->opcode()) { |
| 441 case IrOpcode::kStart: |
| 442 case IrOpcode::kLoop: |
| 443 case IrOpcode::kEnd: |
| 444 case IrOpcode::kBranch: |
| 445 case IrOpcode::kIfTrue: |
| 446 case IrOpcode::kIfFalse: |
| 447 case IrOpcode::kEffectPhi: |
| 448 case IrOpcode::kMerge: |
| 449 case IrOpcode::kProjection: |
| 450 case IrOpcode::kLazyDeoptimization: |
| 451 case IrOpcode::kContinuation: |
| 452 // No code needed for these graph artifacts. |
| 453 return; |
| 454 case IrOpcode::kPhi: |
| 455 return VisitPhi(node); |
| 456 case IrOpcode::kParameter: { |
| 457 int index = OpParameter<int>(node); |
| 458 MachineRepresentation rep = linkage() |
| 459 ->GetIncomingDescriptor() |
| 460 ->GetInputLocation(index) |
| 461 .representation(); |
| 462 MarkAsRepresentation(rep, node); |
| 463 return VisitParameter(node); |
| 464 } |
| 465 case IrOpcode::kInt32Constant: |
| 466 case IrOpcode::kInt64Constant: |
| 467 case IrOpcode::kExternalConstant: |
| 468 return VisitConstant(node); |
| 469 case IrOpcode::kFloat64Constant: |
| 470 return MarkAsDouble(node), VisitConstant(node); |
| 471 case IrOpcode::kHeapConstant: |
| 472 case IrOpcode::kNumberConstant: |
| 473 // TODO(turbofan): only mark non-smis as references. |
| 474 return MarkAsReference(node), VisitConstant(node); |
| 475 case IrOpcode::kCall: |
| 476 return VisitCall(node, NULL, NULL); |
| 477 case IrOpcode::kFrameState: |
| 478 // TODO(titzer): state nodes should be combined into their users. |
| 479 return; |
| 480 case IrOpcode::kLoad: { |
| 481 MachineRepresentation load_rep = OpParameter<MachineRepresentation>(node); |
| 482 MarkAsRepresentation(load_rep, node); |
| 483 return VisitLoad(node); |
| 484 } |
| 485 case IrOpcode::kStore: |
| 486 return VisitStore(node); |
| 487 case IrOpcode::kWord32And: |
| 488 return VisitWord32And(node); |
| 489 case IrOpcode::kWord32Or: |
| 490 return VisitWord32Or(node); |
| 491 case IrOpcode::kWord32Xor: |
| 492 return VisitWord32Xor(node); |
| 493 case IrOpcode::kWord32Shl: |
| 494 return VisitWord32Shl(node); |
| 495 case IrOpcode::kWord32Shr: |
| 496 return VisitWord32Shr(node); |
| 497 case IrOpcode::kWord32Sar: |
| 498 return VisitWord32Sar(node); |
| 499 case IrOpcode::kWord32Equal: |
| 500 return VisitWord32Equal(node); |
| 501 case IrOpcode::kWord64And: |
| 502 return VisitWord64And(node); |
| 503 case IrOpcode::kWord64Or: |
| 504 return VisitWord64Or(node); |
| 505 case IrOpcode::kWord64Xor: |
| 506 return VisitWord64Xor(node); |
| 507 case IrOpcode::kWord64Shl: |
| 508 return VisitWord64Shl(node); |
| 509 case IrOpcode::kWord64Shr: |
| 510 return VisitWord64Shr(node); |
| 511 case IrOpcode::kWord64Sar: |
| 512 return VisitWord64Sar(node); |
| 513 case IrOpcode::kWord64Equal: |
| 514 return VisitWord64Equal(node); |
| 515 case IrOpcode::kInt32Add: |
| 516 return VisitInt32Add(node); |
| 517 case IrOpcode::kInt32Sub: |
| 518 return VisitInt32Sub(node); |
| 519 case IrOpcode::kInt32Mul: |
| 520 return VisitInt32Mul(node); |
| 521 case IrOpcode::kInt32Div: |
| 522 return VisitInt32Div(node); |
| 523 case IrOpcode::kInt32UDiv: |
| 524 return VisitInt32UDiv(node); |
| 525 case IrOpcode::kInt32Mod: |
| 526 return VisitInt32Mod(node); |
| 527 case IrOpcode::kInt32UMod: |
| 528 return VisitInt32UMod(node); |
| 529 case IrOpcode::kInt32LessThan: |
| 530 return VisitInt32LessThan(node); |
| 531 case IrOpcode::kInt32LessThanOrEqual: |
| 532 return VisitInt32LessThanOrEqual(node); |
| 533 case IrOpcode::kUint32LessThan: |
| 534 return VisitUint32LessThan(node); |
| 535 case IrOpcode::kUint32LessThanOrEqual: |
| 536 return VisitUint32LessThanOrEqual(node); |
| 537 case IrOpcode::kInt64Add: |
| 538 return VisitInt64Add(node); |
| 539 case IrOpcode::kInt64Sub: |
| 540 return VisitInt64Sub(node); |
| 541 case IrOpcode::kInt64Mul: |
| 542 return VisitInt64Mul(node); |
| 543 case IrOpcode::kInt64Div: |
| 544 return VisitInt64Div(node); |
| 545 case IrOpcode::kInt64UDiv: |
| 546 return VisitInt64UDiv(node); |
| 547 case IrOpcode::kInt64Mod: |
| 548 return VisitInt64Mod(node); |
| 549 case IrOpcode::kInt64UMod: |
| 550 return VisitInt64UMod(node); |
| 551 case IrOpcode::kInt64LessThan: |
| 552 return VisitInt64LessThan(node); |
| 553 case IrOpcode::kInt64LessThanOrEqual: |
| 554 return VisitInt64LessThanOrEqual(node); |
| 555 case IrOpcode::kConvertInt32ToInt64: |
| 556 return VisitConvertInt32ToInt64(node); |
| 557 case IrOpcode::kConvertInt64ToInt32: |
| 558 return VisitConvertInt64ToInt32(node); |
| 559 case IrOpcode::kConvertInt32ToFloat64: |
| 560 return MarkAsDouble(node), VisitConvertInt32ToFloat64(node); |
| 561 case IrOpcode::kConvertFloat64ToInt32: |
| 562 return VisitConvertFloat64ToInt32(node); |
| 563 case IrOpcode::kFloat64Add: |
| 564 return MarkAsDouble(node), VisitFloat64Add(node); |
| 565 case IrOpcode::kFloat64Sub: |
| 566 return MarkAsDouble(node), VisitFloat64Sub(node); |
| 567 case IrOpcode::kFloat64Mul: |
| 568 return MarkAsDouble(node), VisitFloat64Mul(node); |
| 569 case IrOpcode::kFloat64Div: |
| 570 return MarkAsDouble(node), VisitFloat64Div(node); |
| 571 case IrOpcode::kFloat64Mod: |
| 572 return MarkAsDouble(node), VisitFloat64Mod(node); |
| 573 case IrOpcode::kFloat64Equal: |
| 574 return VisitFloat64Equal(node); |
| 575 case IrOpcode::kFloat64LessThan: |
| 576 return VisitFloat64LessThan(node); |
| 577 case IrOpcode::kFloat64LessThanOrEqual: |
| 578 return VisitFloat64LessThanOrEqual(node); |
| 579 default: |
| 580 V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d", |
| 581 node->opcode(), node->op()->mnemonic(), node->id()); |
| 582 } |
| 583 } |
| 584 |
| 585 |
| 586 void InstructionSelector::VisitWord32Equal(Node* node) { |
| 587 FlagsContinuation cont(kEqual, node); |
| 588 Int32BinopMatcher m(node); |
| 589 if (m.right().Is(0)) { |
| 590 return VisitWord32Test(m.left().node(), &cont); |
| 591 } |
| 592 VisitWord32Compare(node, &cont); |
| 593 } |
| 594 |
| 595 |
| 596 void InstructionSelector::VisitInt32LessThan(Node* node) { |
| 597 FlagsContinuation cont(kSignedLessThan, node); |
| 598 VisitWord32Compare(node, &cont); |
| 599 } |
| 600 |
| 601 |
| 602 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) { |
| 603 FlagsContinuation cont(kSignedLessThanOrEqual, node); |
| 604 VisitWord32Compare(node, &cont); |
| 605 } |
| 606 |
| 607 |
| 608 void InstructionSelector::VisitUint32LessThan(Node* node) { |
| 609 FlagsContinuation cont(kUnsignedLessThan, node); |
| 610 VisitWord32Compare(node, &cont); |
| 611 } |
| 612 |
| 613 |
| 614 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { |
| 615 FlagsContinuation cont(kUnsignedLessThanOrEqual, node); |
| 616 VisitWord32Compare(node, &cont); |
| 617 } |
| 618 |
| 619 |
| 620 void InstructionSelector::VisitWord64Equal(Node* node) { |
| 621 FlagsContinuation cont(kEqual, node); |
| 622 Int64BinopMatcher m(node); |
| 623 if (m.right().Is(0)) { |
| 624 return VisitWord64Test(m.left().node(), &cont); |
| 625 } |
| 626 VisitWord64Compare(node, &cont); |
| 627 } |
| 628 |
| 629 |
| 630 void InstructionSelector::VisitInt64LessThan(Node* node) { |
| 631 FlagsContinuation cont(kSignedLessThan, node); |
| 632 VisitWord64Compare(node, &cont); |
| 633 } |
| 634 |
| 635 |
| 636 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { |
| 637 FlagsContinuation cont(kSignedLessThanOrEqual, node); |
| 638 VisitWord64Compare(node, &cont); |
| 639 } |
| 640 |
| 641 |
| 642 void InstructionSelector::VisitFloat64Equal(Node* node) { |
| 643 FlagsContinuation cont(kUnorderedEqual, node); |
| 644 VisitFloat64Compare(node, &cont); |
| 645 } |
| 646 |
| 647 |
| 648 void InstructionSelector::VisitFloat64LessThan(Node* node) { |
| 649 FlagsContinuation cont(kUnorderedLessThan, node); |
| 650 VisitFloat64Compare(node, &cont); |
| 651 } |
| 652 |
| 653 |
| 654 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { |
| 655 FlagsContinuation cont(kUnorderedLessThanOrEqual, node); |
| 656 VisitFloat64Compare(node, &cont); |
| 657 } |
| 658 |
| 659 |
| 660 // 32 bit targets do not implement the following instructions. |
| 661 #if V8_TARGET_ARCH_32_BIT |
| 662 |
| 663 void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); } |
| 664 |
| 665 |
| 666 void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); } |
| 667 |
| 668 |
| 669 void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); } |
| 670 |
| 671 |
| 672 void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); } |
| 673 |
| 674 |
| 675 void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); } |
| 676 |
| 677 |
| 678 void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); } |
| 679 |
| 680 |
| 681 void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); } |
| 682 |
| 683 |
| 684 void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); } |
| 685 |
| 686 |
| 687 void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); } |
| 688 |
| 689 |
| 690 void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); } |
| 691 |
| 692 |
| 693 void InstructionSelector::VisitInt64UDiv(Node* node) { UNIMPLEMENTED(); } |
| 694 |
| 695 |
| 696 void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); } |
| 697 |
| 698 |
| 699 void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); } |
| 700 |
| 701 |
| 702 void InstructionSelector::VisitConvertInt64ToInt32(Node* node) { |
| 703 UNIMPLEMENTED(); |
| 704 } |
| 705 |
| 706 |
| 707 void InstructionSelector::VisitConvertInt32ToInt64(Node* node) { |
| 708 UNIMPLEMENTED(); |
| 709 } |
| 710 |
| 711 |
| 712 void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) { |
| 713 UNIMPLEMENTED(); |
| 714 } |
| 715 |
| 716 |
| 717 void InstructionSelector::VisitWord64Compare(Node* node, |
| 718 FlagsContinuation* cont) { |
| 719 UNIMPLEMENTED(); |
| 720 } |
| 721 |
| 722 #endif // V8_TARGET_ARCH_32_BIT |
| 723 |
| 724 |
| 725 void InstructionSelector::VisitPhi(Node* node) { |
| 726 // TODO(bmeurer): Emit a PhiInstruction here. |
| 727 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { |
| 728 MarkAsUsed(*i); |
| 729 } |
| 730 } |
| 731 |
| 732 |
| 733 void InstructionSelector::VisitParameter(Node* node) { |
| 734 OperandGenerator g(this); |
| 735 Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation( |
| 736 OpParameter<int>(node)))); |
| 737 } |
| 738 |
| 739 |
| 740 void InstructionSelector::VisitConstant(Node* node) { |
| 741 // We must emit a NOP here because every live range needs a defining |
| 742 // instruction in the register allocator. |
| 743 OperandGenerator g(this); |
| 744 Emit(kArchNop, g.DefineAsConstant(node)); |
| 745 } |
| 746 |
| 747 |
| 748 void InstructionSelector::VisitGoto(BasicBlock* target) { |
| 749 if (IsNextInAssemblyOrder(target)) { |
| 750 // fall through to the next block. |
| 751 Emit(kArchNop, NULL)->MarkAsControl(); |
| 752 } else { |
| 753 // jump to the next block. |
| 754 OperandGenerator g(this); |
| 755 Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl(); |
| 756 } |
| 757 } |
| 758 |
| 759 |
| 760 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch, |
| 761 BasicBlock* fbranch) { |
| 762 OperandGenerator g(this); |
| 763 Node* user = branch; |
| 764 Node* value = branch->InputAt(0); |
| 765 |
| 766 FlagsContinuation cont(kNotEqual, tbranch, fbranch); |
| 767 |
| 768 // If we can fall through to the true block, invert the branch. |
| 769 if (IsNextInAssemblyOrder(tbranch)) { |
| 770 cont.Negate(); |
| 771 cont.SwapBlocks(); |
| 772 } |
| 773 |
| 774 // Try to combine with comparisons against 0 by simply inverting the branch. |
| 775 while (CanCover(user, value)) { |
| 776 if (value->opcode() == IrOpcode::kWord32Equal) { |
| 777 Int32BinopMatcher m(value); |
| 778 if (m.right().Is(0)) { |
| 779 user = value; |
| 780 value = m.left().node(); |
| 781 cont.Negate(); |
| 782 } else { |
| 783 break; |
| 784 } |
| 785 } else if (value->opcode() == IrOpcode::kWord64Equal) { |
| 786 Int64BinopMatcher m(value); |
| 787 if (m.right().Is(0)) { |
| 788 user = value; |
| 789 value = m.left().node(); |
| 790 cont.Negate(); |
| 791 } else { |
| 792 break; |
| 793 } |
| 794 } else { |
| 795 break; |
| 796 } |
| 797 } |
| 798 |
| 799 // Try to combine the branch with a comparison. |
| 800 if (CanCover(user, value)) { |
| 801 switch (value->opcode()) { |
| 802 case IrOpcode::kWord32Equal: |
| 803 cont.OverwriteAndNegateIfEqual(kEqual); |
| 804 return VisitWord32Compare(value, &cont); |
| 805 case IrOpcode::kInt32LessThan: |
| 806 cont.OverwriteAndNegateIfEqual(kSignedLessThan); |
| 807 return VisitWord32Compare(value, &cont); |
| 808 case IrOpcode::kInt32LessThanOrEqual: |
| 809 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); |
| 810 return VisitWord32Compare(value, &cont); |
| 811 case IrOpcode::kUint32LessThan: |
| 812 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan); |
| 813 return VisitWord32Compare(value, &cont); |
| 814 case IrOpcode::kUint32LessThanOrEqual: |
| 815 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); |
| 816 return VisitWord32Compare(value, &cont); |
| 817 case IrOpcode::kWord64Equal: |
| 818 cont.OverwriteAndNegateIfEqual(kEqual); |
| 819 return VisitWord64Compare(value, &cont); |
| 820 case IrOpcode::kInt64LessThan: |
| 821 cont.OverwriteAndNegateIfEqual(kSignedLessThan); |
| 822 return VisitWord64Compare(value, &cont); |
| 823 case IrOpcode::kInt64LessThanOrEqual: |
| 824 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); |
| 825 return VisitWord64Compare(value, &cont); |
| 826 case IrOpcode::kFloat64Equal: |
| 827 cont.OverwriteAndNegateIfEqual(kUnorderedEqual); |
| 828 return VisitFloat64Compare(value, &cont); |
| 829 case IrOpcode::kFloat64LessThan: |
| 830 cont.OverwriteAndNegateIfEqual(kUnorderedLessThan); |
| 831 return VisitFloat64Compare(value, &cont); |
| 832 case IrOpcode::kFloat64LessThanOrEqual: |
| 833 cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual); |
| 834 return VisitFloat64Compare(value, &cont); |
| 835 default: |
| 836 break; |
| 837 } |
| 838 } |
| 839 |
| 840 // Branch could not be combined with a compare, emit compare against 0. |
| 841 VisitWord32Test(value, &cont); |
| 842 } |
| 843 |
| 844 |
| 845 void InstructionSelector::VisitReturn(Node* value) { |
| 846 OperandGenerator g(this); |
| 847 if (value != NULL) { |
| 848 Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation())); |
| 849 } else { |
| 850 Emit(kArchRet, NULL); |
| 851 } |
| 852 } |
| 853 |
| 854 |
| 855 void InstructionSelector::VisitThrow(Node* value) { |
| 856 UNIMPLEMENTED(); // TODO(titzer) |
| 857 } |
| 858 |
| 859 |
| 860 void InstructionSelector::VisitDeoptimization(Node* deopt) { |
| 861 ASSERT(deopt->op()->opcode() == IrOpcode::kDeoptimize); |
| 862 Node* state = deopt->InputAt(0); |
| 863 ASSERT(state->op()->opcode() == IrOpcode::kFrameState); |
| 864 FrameStateDescriptor descriptor = OpParameter<FrameStateDescriptor>(state); |
| 865 // TODO(jarin) We should also add an instruction input for every input to |
| 866 // the framestate node (and recurse for the inlined framestates). |
| 867 int deoptimization_id = sequence()->AddDeoptimizationEntry(descriptor); |
| 868 Emit(kArchDeoptimize | MiscField::encode(deoptimization_id), NULL); |
| 869 } |
| 870 |
| 871 } // namespace compiler |
| 872 } // namespace internal |
| 873 } // namespace v8 |
OLD | NEW |