OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "src/compiler/instruction-selector-impl.h" |
| 6 #include "src/compiler/node-matchers.h" |
| 7 #include "src/compiler/node-properties.h" |
| 8 |
| 9 namespace v8 { |
| 10 namespace internal { |
| 11 namespace compiler { |
| 12 |
| 13 enum ImmediateMode { |
| 14 kInt16Imm, |
| 15 kInt16Imm_Unsigned, |
| 16 kInt16Imm_Negate, |
| 17 kInt16Imm_4ByteAligned, |
| 18 kShift32Imm, |
| 19 kShift64Imm, |
| 20 kNoImmediate |
| 21 }; |
| 22 |
| 23 |
| 24 // Adds PPC-specific methods for generating operands. |
| 25 class PPCOperandGenerator FINAL : public OperandGenerator { |
| 26 public: |
| 27 explicit PPCOperandGenerator(InstructionSelector* selector) |
| 28 : OperandGenerator(selector) {} |
| 29 |
| 30 InstructionOperand UseOperand(Node* node, ImmediateMode mode) { |
| 31 if (CanBeImmediate(node, mode)) { |
| 32 return UseImmediate(node); |
| 33 } |
| 34 return UseRegister(node); |
| 35 } |
| 36 |
| 37 bool CanBeImmediate(Node* node, ImmediateMode mode) { |
| 38 int64_t value; |
| 39 if (node->opcode() == IrOpcode::kInt32Constant) |
| 40 value = OpParameter<int32_t>(node); |
| 41 else if (node->opcode() == IrOpcode::kInt64Constant) |
| 42 value = OpParameter<int64_t>(node); |
| 43 else |
| 44 return false; |
| 45 return CanBeImmediate(value, mode); |
| 46 } |
| 47 |
| 48 bool CanBeImmediate(int64_t value, ImmediateMode mode) { |
| 49 switch (mode) { |
| 50 case kInt16Imm: |
| 51 return is_int16(value); |
| 52 case kInt16Imm_Unsigned: |
| 53 return is_uint16(value); |
| 54 case kInt16Imm_Negate: |
| 55 return is_int16(-value); |
| 56 case kInt16Imm_4ByteAligned: |
| 57 return is_int16(value) && !(value & 3); |
| 58 case kShift32Imm: |
| 59 return 0 <= value && value < 32; |
| 60 case kShift64Imm: |
| 61 return 0 <= value && value < 64; |
| 62 case kNoImmediate: |
| 63 return false; |
| 64 } |
| 65 return false; |
| 66 } |
| 67 }; |
| 68 |
| 69 |
| 70 static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode, |
| 71 Node* node) { |
| 72 PPCOperandGenerator g(selector); |
| 73 selector->Emit(opcode, g.DefineAsRegister(node), |
| 74 g.UseRegister(node->InputAt(0))); |
| 75 } |
| 76 |
| 77 |
| 78 static void VisitRRR(InstructionSelector* selector, Node* node, |
| 79 ArchOpcode opcode) { |
| 80 PPCOperandGenerator g(selector); |
| 81 selector->Emit(opcode, g.DefineAsRegister(node), |
| 82 g.UseRegister(node->InputAt(0)), |
| 83 g.UseRegister(node->InputAt(1))); |
| 84 } |
| 85 |
| 86 |
| 87 static void VisitRRRFloat64(InstructionSelector* selector, Node* node, |
| 88 ArchOpcode opcode) { |
| 89 PPCOperandGenerator g(selector); |
| 90 selector->Emit(opcode, g.DefineAsRegister(node), |
| 91 g.UseRegister(node->InputAt(0)), |
| 92 g.UseRegister(node->InputAt(1))); |
| 93 } |
| 94 |
| 95 |
| 96 static void VisitRRO(InstructionSelector* selector, Node* node, |
| 97 ArchOpcode opcode, ImmediateMode operand_mode) { |
| 98 PPCOperandGenerator g(selector); |
| 99 selector->Emit(opcode, g.DefineAsRegister(node), |
| 100 g.UseRegister(node->InputAt(0)), |
| 101 g.UseOperand(node->InputAt(1), operand_mode)); |
| 102 } |
| 103 |
| 104 |
| 105 // Shared routine for multiple binary operations. |
| 106 template <typename Matcher> |
| 107 static void VisitBinop(InstructionSelector* selector, Node* node, |
| 108 InstructionCode opcode, ImmediateMode operand_mode, |
| 109 FlagsContinuation* cont) { |
| 110 PPCOperandGenerator g(selector); |
| 111 Matcher m(node); |
| 112 InstructionOperand inputs[4]; |
| 113 size_t input_count = 0; |
| 114 InstructionOperand outputs[2]; |
| 115 size_t output_count = 0; |
| 116 |
| 117 inputs[input_count++] = g.UseRegister(m.left().node()); |
| 118 inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode); |
| 119 |
| 120 if (cont->IsBranch()) { |
| 121 inputs[input_count++] = g.Label(cont->true_block()); |
| 122 inputs[input_count++] = g.Label(cont->false_block()); |
| 123 } |
| 124 |
| 125 outputs[output_count++] = g.DefineAsRegister(node); |
| 126 if (cont->IsSet()) { |
| 127 outputs[output_count++] = g.DefineAsRegister(cont->result()); |
| 128 } |
| 129 |
| 130 DCHECK_NE(0u, input_count); |
| 131 DCHECK_NE(0u, output_count); |
| 132 DCHECK_GE(arraysize(inputs), input_count); |
| 133 DCHECK_GE(arraysize(outputs), output_count); |
| 134 |
| 135 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count, |
| 136 outputs, input_count, inputs); |
| 137 if (cont->IsBranch()) instr->MarkAsControl(); |
| 138 } |
| 139 |
| 140 |
| 141 // Shared routine for multiple binary operations. |
| 142 template <typename Matcher> |
| 143 static void VisitBinop(InstructionSelector* selector, Node* node, |
| 144 ArchOpcode opcode, ImmediateMode operand_mode) { |
| 145 FlagsContinuation cont; |
| 146 VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont); |
| 147 } |
| 148 |
| 149 |
| 150 void InstructionSelector::VisitLoad(Node* node) { |
| 151 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); |
| 152 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node)); |
| 153 PPCOperandGenerator g(this); |
| 154 Node* base = node->InputAt(0); |
| 155 Node* offset = node->InputAt(1); |
| 156 |
| 157 ArchOpcode opcode; |
| 158 ImmediateMode mode = kInt16Imm; |
| 159 switch (rep) { |
| 160 case kRepFloat32: |
| 161 opcode = kPPC_LoadFloat32; |
| 162 break; |
| 163 case kRepFloat64: |
| 164 opcode = kPPC_LoadFloat64; |
| 165 break; |
| 166 case kRepBit: // Fall through. |
| 167 case kRepWord8: |
| 168 opcode = (typ == kTypeInt32) ? kPPC_LoadWordS8 : kPPC_LoadWordU8; |
| 169 break; |
| 170 case kRepWord16: |
| 171 opcode = (typ == kTypeInt32) ? kPPC_LoadWordS16 : kPPC_LoadWordU16; |
| 172 break; |
| 173 #if !V8_TARGET_ARCH_PPC64 |
| 174 case kRepTagged: // Fall through. |
| 175 #endif |
| 176 case kRepWord32: |
| 177 opcode = kPPC_LoadWordS32; |
| 178 #if V8_TARGET_ARCH_PPC64 |
| 179 // TODO(mbrandy): this applies to signed loads only (lwa) |
| 180 mode = kInt16Imm_4ByteAligned; |
| 181 #endif |
| 182 break; |
| 183 #if V8_TARGET_ARCH_PPC64 |
| 184 case kRepTagged: // Fall through. |
| 185 case kRepWord64: |
| 186 opcode = kPPC_LoadWord64; |
| 187 mode = kInt16Imm_4ByteAligned; |
| 188 break; |
| 189 #endif |
| 190 default: |
| 191 UNREACHABLE(); |
| 192 return; |
| 193 } |
| 194 if (g.CanBeImmediate(offset, mode)) { |
| 195 Emit(opcode | AddressingModeField::encode(kMode_MRI), |
| 196 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset)); |
| 197 } else if (g.CanBeImmediate(base, mode)) { |
| 198 Emit(opcode | AddressingModeField::encode(kMode_MRI), |
| 199 g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base)); |
| 200 } else { |
| 201 Emit(opcode | AddressingModeField::encode(kMode_MRR), |
| 202 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset)); |
| 203 } |
| 204 } |
| 205 |
| 206 |
| 207 void InstructionSelector::VisitStore(Node* node) { |
| 208 PPCOperandGenerator g(this); |
| 209 Node* base = node->InputAt(0); |
| 210 Node* offset = node->InputAt(1); |
| 211 Node* value = node->InputAt(2); |
| 212 |
| 213 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node); |
| 214 MachineType rep = RepresentationOf(store_rep.machine_type()); |
| 215 if (store_rep.write_barrier_kind() == kFullWriteBarrier) { |
| 216 DCHECK(rep == kRepTagged); |
| 217 // TODO(dcarney): refactor RecordWrite function to take temp registers |
| 218 // and pass them here instead of using fixed regs |
| 219 // TODO(dcarney): handle immediate indices. |
| 220 InstructionOperand temps[] = {g.TempRegister(r8), g.TempRegister(r9)}; |
| 221 Emit(kPPC_StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, r7), |
| 222 g.UseFixed(offset, r8), g.UseFixed(value, r9), arraysize(temps), |
| 223 temps); |
| 224 return; |
| 225 } |
| 226 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind()); |
| 227 ArchOpcode opcode; |
| 228 ImmediateMode mode = kInt16Imm; |
| 229 switch (rep) { |
| 230 case kRepFloat32: |
| 231 opcode = kPPC_StoreFloat32; |
| 232 break; |
| 233 case kRepFloat64: |
| 234 opcode = kPPC_StoreFloat64; |
| 235 break; |
| 236 case kRepBit: // Fall through. |
| 237 case kRepWord8: |
| 238 opcode = kPPC_StoreWord8; |
| 239 break; |
| 240 case kRepWord16: |
| 241 opcode = kPPC_StoreWord16; |
| 242 break; |
| 243 #if !V8_TARGET_ARCH_PPC64 |
| 244 case kRepTagged: // Fall through. |
| 245 #endif |
| 246 case kRepWord32: |
| 247 opcode = kPPC_StoreWord32; |
| 248 break; |
| 249 #if V8_TARGET_ARCH_PPC64 |
| 250 case kRepTagged: // Fall through. |
| 251 case kRepWord64: |
| 252 opcode = kPPC_StoreWord64; |
| 253 mode = kInt16Imm_4ByteAligned; |
| 254 break; |
| 255 #endif |
| 256 default: |
| 257 UNREACHABLE(); |
| 258 return; |
| 259 } |
| 260 if (g.CanBeImmediate(offset, mode)) { |
| 261 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), |
| 262 g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value)); |
| 263 } else if (g.CanBeImmediate(base, mode)) { |
| 264 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), |
| 265 g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value)); |
| 266 } else { |
| 267 Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(), |
| 268 g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value)); |
| 269 } |
| 270 } |
| 271 |
| 272 |
| 273 void InstructionSelector::VisitCheckedLoad(Node* node) { |
| 274 MachineType rep = RepresentationOf(OpParameter<MachineType>(node)); |
| 275 MachineType typ = TypeOf(OpParameter<MachineType>(node)); |
| 276 PPCOperandGenerator g(this); |
| 277 Node* const base = node->InputAt(0); |
| 278 Node* const offset = node->InputAt(1); |
| 279 Node* const length = node->InputAt(2); |
| 280 ArchOpcode opcode; |
| 281 switch (rep) { |
| 282 case kRepWord8: |
| 283 opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8; |
| 284 break; |
| 285 case kRepWord16: |
| 286 opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16; |
| 287 break; |
| 288 case kRepWord32: |
| 289 opcode = kCheckedLoadWord32; |
| 290 break; |
| 291 case kRepFloat32: |
| 292 opcode = kCheckedLoadFloat32; |
| 293 break; |
| 294 case kRepFloat64: |
| 295 opcode = kCheckedLoadFloat64; |
| 296 break; |
| 297 default: |
| 298 UNREACHABLE(); |
| 299 return; |
| 300 } |
| 301 AddressingMode addressingMode = kMode_MRR; |
| 302 Emit(opcode | AddressingModeField::encode(addressingMode), |
| 303 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset), |
| 304 g.UseOperand(length, kInt16Imm_Unsigned)); |
| 305 } |
| 306 |
| 307 |
| 308 void InstructionSelector::VisitCheckedStore(Node* node) { |
| 309 MachineType rep = RepresentationOf(OpParameter<MachineType>(node)); |
| 310 PPCOperandGenerator g(this); |
| 311 Node* const base = node->InputAt(0); |
| 312 Node* const offset = node->InputAt(1); |
| 313 Node* const length = node->InputAt(2); |
| 314 Node* const value = node->InputAt(3); |
| 315 ArchOpcode opcode; |
| 316 switch (rep) { |
| 317 case kRepWord8: |
| 318 opcode = kCheckedStoreWord8; |
| 319 break; |
| 320 case kRepWord16: |
| 321 opcode = kCheckedStoreWord16; |
| 322 break; |
| 323 case kRepWord32: |
| 324 opcode = kCheckedStoreWord32; |
| 325 break; |
| 326 case kRepFloat32: |
| 327 opcode = kCheckedStoreFloat32; |
| 328 break; |
| 329 case kRepFloat64: |
| 330 opcode = kCheckedStoreFloat64; |
| 331 break; |
| 332 default: |
| 333 UNREACHABLE(); |
| 334 return; |
| 335 } |
| 336 AddressingMode addressingMode = kMode_MRR; |
| 337 Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(), |
| 338 g.UseRegister(base), g.UseRegister(offset), |
| 339 g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value)); |
| 340 } |
| 341 |
| 342 |
| 343 template <typename Matcher> |
| 344 static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m, |
| 345 ArchOpcode opcode, bool left_can_cover, |
| 346 bool right_can_cover, ImmediateMode imm_mode) { |
| 347 PPCOperandGenerator g(selector); |
| 348 |
| 349 // Map instruction to equivalent operation with inverted right input. |
| 350 ArchOpcode inv_opcode = opcode; |
| 351 switch (opcode) { |
| 352 case kPPC_And32: |
| 353 inv_opcode = kPPC_AndComplement32; |
| 354 break; |
| 355 case kPPC_And64: |
| 356 inv_opcode = kPPC_AndComplement64; |
| 357 break; |
| 358 case kPPC_Or32: |
| 359 inv_opcode = kPPC_OrComplement32; |
| 360 break; |
| 361 case kPPC_Or64: |
| 362 inv_opcode = kPPC_OrComplement64; |
| 363 break; |
| 364 default: |
| 365 UNREACHABLE(); |
| 366 } |
| 367 |
| 368 // Select Logical(y, ~x) for Logical(Xor(x, -1), y). |
| 369 if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) { |
| 370 Matcher mleft(m->left().node()); |
| 371 if (mleft.right().Is(-1)) { |
| 372 selector->Emit(inv_opcode, g.DefineAsRegister(node), |
| 373 g.UseRegister(m->right().node()), |
| 374 g.UseRegister(mleft.left().node())); |
| 375 return; |
| 376 } |
| 377 } |
| 378 |
| 379 // Select Logical(x, ~y) for Logical(x, Xor(y, -1)). |
| 380 if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) && |
| 381 right_can_cover) { |
| 382 Matcher mright(m->right().node()); |
| 383 if (mright.right().Is(-1)) { |
| 384 // TODO(all): support shifted operand on right. |
| 385 selector->Emit(inv_opcode, g.DefineAsRegister(node), |
| 386 g.UseRegister(m->left().node()), |
| 387 g.UseRegister(mright.left().node())); |
| 388 return; |
| 389 } |
| 390 } |
| 391 |
| 392 VisitBinop<Matcher>(selector, node, opcode, imm_mode); |
| 393 } |
| 394 |
| 395 |
| 396 static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) { |
| 397 int mask_width = base::bits::CountPopulation32(value); |
| 398 int mask_msb = base::bits::CountLeadingZeros32(value); |
| 399 int mask_lsb = base::bits::CountTrailingZeros32(value); |
| 400 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32)) |
| 401 return false; |
| 402 *mb = mask_lsb + mask_width - 1; |
| 403 *me = mask_lsb; |
| 404 return true; |
| 405 } |
| 406 |
| 407 |
| 408 #if V8_TARGET_ARCH_PPC64 |
| 409 static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) { |
| 410 int mask_width = base::bits::CountPopulation64(value); |
| 411 int mask_msb = base::bits::CountLeadingZeros64(value); |
| 412 int mask_lsb = base::bits::CountTrailingZeros64(value); |
| 413 if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64)) |
| 414 return false; |
| 415 *mb = mask_lsb + mask_width - 1; |
| 416 *me = mask_lsb; |
| 417 return true; |
| 418 } |
| 419 #endif |
| 420 |
| 421 |
| 422 // TODO(mbrandy): Absorb rotate-right into rlwinm? |
| 423 void InstructionSelector::VisitWord32And(Node* node) { |
| 424 PPCOperandGenerator g(this); |
| 425 Int32BinopMatcher m(node); |
| 426 int mb; |
| 427 int me; |
| 428 if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) { |
| 429 int sh = 0; |
| 430 Node* left = m.left().node(); |
| 431 if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) && |
| 432 CanCover(node, left)) { |
| 433 // Try to absorb left/right shift into rlwinm |
| 434 Int32BinopMatcher mleft(m.left().node()); |
| 435 if (mleft.right().IsInRange(0, 31)) { |
| 436 left = mleft.left().node(); |
| 437 sh = mleft.right().Value(); |
| 438 if (m.left().IsWord32Shr()) { |
| 439 // Adjust the mask such that it doesn't include any rotated bits. |
| 440 if (mb > 31 - sh) mb = 31 - sh; |
| 441 sh = (32 - sh) & 0x1f; |
| 442 } else { |
| 443 // Adjust the mask such that it doesn't include any rotated bits. |
| 444 if (me < sh) me = sh; |
| 445 } |
| 446 } |
| 447 } |
| 448 if (mb >= me) { |
| 449 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), g.UseRegister(left), |
| 450 g.TempImmediate(sh), g.TempImmediate(mb), g.TempImmediate(me)); |
| 451 return; |
| 452 } |
| 453 } |
| 454 VisitLogical<Int32BinopMatcher>( |
| 455 this, node, &m, kPPC_And32, CanCover(node, m.left().node()), |
| 456 CanCover(node, m.right().node()), kInt16Imm_Unsigned); |
| 457 } |
| 458 |
| 459 |
| 460 #if V8_TARGET_ARCH_PPC64 |
| 461 // TODO(mbrandy): Absorb rotate-right into rldic? |
| 462 void InstructionSelector::VisitWord64And(Node* node) { |
| 463 PPCOperandGenerator g(this); |
| 464 Int64BinopMatcher m(node); |
| 465 int mb; |
| 466 int me; |
| 467 if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) { |
| 468 int sh = 0; |
| 469 Node* left = m.left().node(); |
| 470 if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) && |
| 471 CanCover(node, left)) { |
| 472 // Try to absorb left/right shift into rldic |
| 473 Int64BinopMatcher mleft(m.left().node()); |
| 474 if (mleft.right().IsInRange(0, 63)) { |
| 475 left = mleft.left().node(); |
| 476 sh = mleft.right().Value(); |
| 477 if (m.left().IsWord64Shr()) { |
| 478 // Adjust the mask such that it doesn't include any rotated bits. |
| 479 if (mb > 63 - sh) mb = 63 - sh; |
| 480 sh = (64 - sh) & 0x3f; |
| 481 } else { |
| 482 // Adjust the mask such that it doesn't include any rotated bits. |
| 483 if (me < sh) me = sh; |
| 484 } |
| 485 } |
| 486 } |
| 487 if (mb >= me) { |
| 488 bool match = false; |
| 489 ArchOpcode opcode; |
| 490 int mask; |
| 491 if (me == 0) { |
| 492 match = true; |
| 493 opcode = kPPC_RotLeftAndClearLeft64; |
| 494 mask = mb; |
| 495 } else if (mb == 63) { |
| 496 match = true; |
| 497 opcode = kPPC_RotLeftAndClearRight64; |
| 498 mask = me; |
| 499 } else if (sh && me <= sh && m.left().IsWord64Shl()) { |
| 500 match = true; |
| 501 opcode = kPPC_RotLeftAndClear64; |
| 502 mask = mb; |
| 503 } |
| 504 if (match) { |
| 505 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left), |
| 506 g.TempImmediate(sh), g.TempImmediate(mask)); |
| 507 return; |
| 508 } |
| 509 } |
| 510 } |
| 511 VisitLogical<Int64BinopMatcher>( |
| 512 this, node, &m, kPPC_And64, CanCover(node, m.left().node()), |
| 513 CanCover(node, m.right().node()), kInt16Imm_Unsigned); |
| 514 } |
| 515 #endif |
| 516 |
| 517 |
| 518 void InstructionSelector::VisitWord32Or(Node* node) { |
| 519 Int32BinopMatcher m(node); |
| 520 VisitLogical<Int32BinopMatcher>( |
| 521 this, node, &m, kPPC_Or32, CanCover(node, m.left().node()), |
| 522 CanCover(node, m.right().node()), kInt16Imm_Unsigned); |
| 523 } |
| 524 |
| 525 |
| 526 #if V8_TARGET_ARCH_PPC64 |
| 527 void InstructionSelector::VisitWord64Or(Node* node) { |
| 528 Int64BinopMatcher m(node); |
| 529 VisitLogical<Int64BinopMatcher>( |
| 530 this, node, &m, kPPC_Or64, CanCover(node, m.left().node()), |
| 531 CanCover(node, m.right().node()), kInt16Imm_Unsigned); |
| 532 } |
| 533 #endif |
| 534 |
| 535 |
| 536 void InstructionSelector::VisitWord32Xor(Node* node) { |
| 537 PPCOperandGenerator g(this); |
| 538 Int32BinopMatcher m(node); |
| 539 if (m.right().Is(-1)) { |
| 540 Emit(kPPC_Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node())); |
| 541 } else { |
| 542 VisitBinop<Int32BinopMatcher>(this, node, kPPC_Xor32, kInt16Imm_Unsigned); |
| 543 } |
| 544 } |
| 545 |
| 546 |
| 547 #if V8_TARGET_ARCH_PPC64 |
| 548 void InstructionSelector::VisitWord64Xor(Node* node) { |
| 549 PPCOperandGenerator g(this); |
| 550 Int64BinopMatcher m(node); |
| 551 if (m.right().Is(-1)) { |
| 552 Emit(kPPC_Not64, g.DefineAsRegister(node), g.UseRegister(m.left().node())); |
| 553 } else { |
| 554 VisitBinop<Int64BinopMatcher>(this, node, kPPC_Xor64, kInt16Imm_Unsigned); |
| 555 } |
| 556 } |
| 557 #endif |
| 558 |
| 559 |
| 560 void InstructionSelector::VisitWord32Shl(Node* node) { |
| 561 PPCOperandGenerator g(this); |
| 562 Int32BinopMatcher m(node); |
| 563 if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) { |
| 564 // Try to absorb logical-and into rlwinm |
| 565 Int32BinopMatcher mleft(m.left().node()); |
| 566 int sh = m.right().Value(); |
| 567 int mb; |
| 568 int me; |
| 569 if (mleft.right().HasValue() && |
| 570 IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) { |
| 571 // Adjust the mask such that it doesn't include any rotated bits. |
| 572 if (me < sh) me = sh; |
| 573 if (mb >= me) { |
| 574 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), |
| 575 g.UseRegister(mleft.left().node()), g.TempImmediate(sh), |
| 576 g.TempImmediate(mb), g.TempImmediate(me)); |
| 577 return; |
| 578 } |
| 579 } |
| 580 } |
| 581 VisitRRO(this, node, kPPC_ShiftLeft32, kShift32Imm); |
| 582 } |
| 583 |
| 584 |
| 585 #if V8_TARGET_ARCH_PPC64 |
| 586 void InstructionSelector::VisitWord64Shl(Node* node) { |
| 587 PPCOperandGenerator g(this); |
| 588 Int64BinopMatcher m(node); |
| 589 // TODO(mbrandy): eliminate left sign extension if right >= 32 |
| 590 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) { |
| 591 // Try to absorb logical-and into rldic |
| 592 Int64BinopMatcher mleft(m.left().node()); |
| 593 int sh = m.right().Value(); |
| 594 int mb; |
| 595 int me; |
| 596 if (mleft.right().HasValue() && |
| 597 IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) { |
| 598 // Adjust the mask such that it doesn't include any rotated bits. |
| 599 if (me < sh) me = sh; |
| 600 if (mb >= me) { |
| 601 bool match = false; |
| 602 ArchOpcode opcode; |
| 603 int mask; |
| 604 if (me == 0) { |
| 605 match = true; |
| 606 opcode = kPPC_RotLeftAndClearLeft64; |
| 607 mask = mb; |
| 608 } else if (mb == 63) { |
| 609 match = true; |
| 610 opcode = kPPC_RotLeftAndClearRight64; |
| 611 mask = me; |
| 612 } else if (sh && me <= sh) { |
| 613 match = true; |
| 614 opcode = kPPC_RotLeftAndClear64; |
| 615 mask = mb; |
| 616 } |
| 617 if (match) { |
| 618 Emit(opcode, g.DefineAsRegister(node), |
| 619 g.UseRegister(mleft.left().node()), g.TempImmediate(sh), |
| 620 g.TempImmediate(mask)); |
| 621 return; |
| 622 } |
| 623 } |
| 624 } |
| 625 } |
| 626 VisitRRO(this, node, kPPC_ShiftLeft64, kShift64Imm); |
| 627 } |
| 628 #endif |
| 629 |
| 630 |
| 631 void InstructionSelector::VisitWord32Shr(Node* node) { |
| 632 PPCOperandGenerator g(this); |
| 633 Int32BinopMatcher m(node); |
| 634 if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) { |
| 635 // Try to absorb logical-and into rlwinm |
| 636 Int32BinopMatcher mleft(m.left().node()); |
| 637 int sh = m.right().Value(); |
| 638 int mb; |
| 639 int me; |
| 640 if (mleft.right().HasValue() && |
| 641 IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) { |
| 642 // Adjust the mask such that it doesn't include any rotated bits. |
| 643 if (mb > 31 - sh) mb = 31 - sh; |
| 644 sh = (32 - sh) & 0x1f; |
| 645 if (mb >= me) { |
| 646 Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), |
| 647 g.UseRegister(mleft.left().node()), g.TempImmediate(sh), |
| 648 g.TempImmediate(mb), g.TempImmediate(me)); |
| 649 return; |
| 650 } |
| 651 } |
| 652 } |
| 653 VisitRRO(this, node, kPPC_ShiftRight32, kShift32Imm); |
| 654 } |
| 655 |
| 656 |
| 657 #if V8_TARGET_ARCH_PPC64 |
| 658 void InstructionSelector::VisitWord64Shr(Node* node) { |
| 659 PPCOperandGenerator g(this); |
| 660 Int64BinopMatcher m(node); |
| 661 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) { |
| 662 // Try to absorb logical-and into rldic |
| 663 Int64BinopMatcher mleft(m.left().node()); |
| 664 int sh = m.right().Value(); |
| 665 int mb; |
| 666 int me; |
| 667 if (mleft.right().HasValue() && |
| 668 IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) { |
| 669 // Adjust the mask such that it doesn't include any rotated bits. |
| 670 if (mb > 63 - sh) mb = 63 - sh; |
| 671 sh = (64 - sh) & 0x3f; |
| 672 if (mb >= me) { |
| 673 bool match = false; |
| 674 ArchOpcode opcode; |
| 675 int mask; |
| 676 if (me == 0) { |
| 677 match = true; |
| 678 opcode = kPPC_RotLeftAndClearLeft64; |
| 679 mask = mb; |
| 680 } else if (mb == 63) { |
| 681 match = true; |
| 682 opcode = kPPC_RotLeftAndClearRight64; |
| 683 mask = me; |
| 684 } |
| 685 if (match) { |
| 686 Emit(opcode, g.DefineAsRegister(node), |
| 687 g.UseRegister(mleft.left().node()), g.TempImmediate(sh), |
| 688 g.TempImmediate(mask)); |
| 689 return; |
| 690 } |
| 691 } |
| 692 } |
| 693 } |
| 694 VisitRRO(this, node, kPPC_ShiftRight64, kShift64Imm); |
| 695 } |
| 696 #endif |
| 697 |
| 698 |
| 699 void InstructionSelector::VisitWord32Sar(Node* node) { |
| 700 PPCOperandGenerator g(this); |
| 701 Int32BinopMatcher m(node); |
| 702 // Replace with sign extension for (x << K) >> K where K is 16 or 24. |
| 703 if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) { |
| 704 Int32BinopMatcher mleft(m.left().node()); |
| 705 if (mleft.right().Is(16) && m.right().Is(16)) { |
| 706 Emit(kPPC_ExtendSignWord16, g.DefineAsRegister(node), |
| 707 g.UseRegister(mleft.left().node())); |
| 708 return; |
| 709 } else if (mleft.right().Is(24) && m.right().Is(24)) { |
| 710 Emit(kPPC_ExtendSignWord8, g.DefineAsRegister(node), |
| 711 g.UseRegister(mleft.left().node())); |
| 712 return; |
| 713 } |
| 714 } |
| 715 VisitRRO(this, node, kPPC_ShiftRightAlg32, kShift32Imm); |
| 716 } |
| 717 |
| 718 |
| 719 #if V8_TARGET_ARCH_PPC64 |
| 720 void InstructionSelector::VisitWord64Sar(Node* node) { |
| 721 VisitRRO(this, node, kPPC_ShiftRightAlg64, kShift64Imm); |
| 722 } |
| 723 #endif |
| 724 |
| 725 |
| 726 // TODO(mbrandy): Absorb logical-and into rlwinm? |
| 727 void InstructionSelector::VisitWord32Ror(Node* node) { |
| 728 VisitRRO(this, node, kPPC_RotRight32, kShift32Imm); |
| 729 } |
| 730 |
| 731 |
| 732 #if V8_TARGET_ARCH_PPC64 |
| 733 // TODO(mbrandy): Absorb logical-and into rldic? |
| 734 void InstructionSelector::VisitWord64Ror(Node* node) { |
| 735 VisitRRO(this, node, kPPC_RotRight64, kShift64Imm); |
| 736 } |
| 737 #endif |
| 738 |
| 739 |
| 740 void InstructionSelector::VisitInt32Add(Node* node) { |
| 741 VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm); |
| 742 } |
| 743 |
| 744 |
| 745 #if V8_TARGET_ARCH_PPC64 |
| 746 void InstructionSelector::VisitInt64Add(Node* node) { |
| 747 VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm); |
| 748 } |
| 749 #endif |
| 750 |
| 751 |
| 752 void InstructionSelector::VisitInt32Sub(Node* node) { |
| 753 PPCOperandGenerator g(this); |
| 754 Int32BinopMatcher m(node); |
| 755 if (m.left().Is(0)) { |
| 756 Emit(kPPC_Neg32, g.DefineAsRegister(node), g.UseRegister(m.right().node())); |
| 757 } else { |
| 758 VisitBinop<Int32BinopMatcher>(this, node, kPPC_Sub32, kInt16Imm_Negate); |
| 759 } |
| 760 } |
| 761 |
| 762 |
| 763 #if V8_TARGET_ARCH_PPC64 |
| 764 void InstructionSelector::VisitInt64Sub(Node* node) { |
| 765 PPCOperandGenerator g(this); |
| 766 Int64BinopMatcher m(node); |
| 767 if (m.left().Is(0)) { |
| 768 Emit(kPPC_Neg64, g.DefineAsRegister(node), g.UseRegister(m.right().node())); |
| 769 } else { |
| 770 VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub64, kInt16Imm_Negate); |
| 771 } |
| 772 } |
| 773 #endif |
| 774 |
| 775 |
| 776 void InstructionSelector::VisitInt32Mul(Node* node) { |
| 777 VisitRRR(this, node, kPPC_Mul32); |
| 778 } |
| 779 |
| 780 |
| 781 #if V8_TARGET_ARCH_PPC64 |
| 782 void InstructionSelector::VisitInt64Mul(Node* node) { |
| 783 VisitRRR(this, node, kPPC_Mul64); |
| 784 } |
| 785 #endif |
| 786 |
| 787 |
| 788 void InstructionSelector::VisitInt32MulHigh(Node* node) { |
| 789 PPCOperandGenerator g(this); |
| 790 Emit(kPPC_MulHigh32, g.DefineAsRegister(node), |
| 791 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); |
| 792 } |
| 793 |
| 794 |
| 795 void InstructionSelector::VisitUint32MulHigh(Node* node) { |
| 796 PPCOperandGenerator g(this); |
| 797 Emit(kPPC_MulHighU32, g.DefineAsRegister(node), |
| 798 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); |
| 799 } |
| 800 |
| 801 |
| 802 void InstructionSelector::VisitInt32Div(Node* node) { |
| 803 VisitRRR(this, node, kPPC_Div32); |
| 804 } |
| 805 |
| 806 |
| 807 #if V8_TARGET_ARCH_PPC64 |
| 808 void InstructionSelector::VisitInt64Div(Node* node) { |
| 809 VisitRRR(this, node, kPPC_Div64); |
| 810 } |
| 811 #endif |
| 812 |
| 813 |
| 814 void InstructionSelector::VisitUint32Div(Node* node) { |
| 815 VisitRRR(this, node, kPPC_DivU32); |
| 816 } |
| 817 |
| 818 |
| 819 #if V8_TARGET_ARCH_PPC64 |
| 820 void InstructionSelector::VisitUint64Div(Node* node) { |
| 821 VisitRRR(this, node, kPPC_DivU64); |
| 822 } |
| 823 #endif |
| 824 |
| 825 |
| 826 void InstructionSelector::VisitInt32Mod(Node* node) { |
| 827 VisitRRR(this, node, kPPC_Mod32); |
| 828 } |
| 829 |
| 830 |
| 831 #if V8_TARGET_ARCH_PPC64 |
| 832 void InstructionSelector::VisitInt64Mod(Node* node) { |
| 833 VisitRRR(this, node, kPPC_Mod64); |
| 834 } |
| 835 #endif |
| 836 |
| 837 |
| 838 void InstructionSelector::VisitUint32Mod(Node* node) { |
| 839 VisitRRR(this, node, kPPC_ModU32); |
| 840 } |
| 841 |
| 842 |
| 843 #if V8_TARGET_ARCH_PPC64 |
| 844 void InstructionSelector::VisitUint64Mod(Node* node) { |
| 845 VisitRRR(this, node, kPPC_ModU64); |
| 846 } |
| 847 #endif |
| 848 |
| 849 |
| 850 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) { |
| 851 PPCOperandGenerator g(this); |
| 852 Emit(kPPC_Float32ToFloat64, g.DefineAsRegister(node), |
| 853 g.UseRegister(node->InputAt(0))); |
| 854 } |
| 855 |
| 856 |
| 857 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { |
| 858 PPCOperandGenerator g(this); |
| 859 Emit(kPPC_Int32ToFloat64, g.DefineAsRegister(node), |
| 860 g.UseRegister(node->InputAt(0))); |
| 861 } |
| 862 |
| 863 |
| 864 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { |
| 865 PPCOperandGenerator g(this); |
| 866 Emit(kPPC_Uint32ToFloat64, g.DefineAsRegister(node), |
| 867 g.UseRegister(node->InputAt(0))); |
| 868 } |
| 869 |
| 870 |
| 871 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { |
| 872 PPCOperandGenerator g(this); |
| 873 Emit(kPPC_Float64ToInt32, g.DefineAsRegister(node), |
| 874 g.UseRegister(node->InputAt(0))); |
| 875 } |
| 876 |
| 877 |
| 878 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { |
| 879 PPCOperandGenerator g(this); |
| 880 Emit(kPPC_Float64ToUint32, g.DefineAsRegister(node), |
| 881 g.UseRegister(node->InputAt(0))); |
| 882 } |
| 883 |
| 884 |
| 885 #if V8_TARGET_ARCH_PPC64 |
| 886 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { |
| 887 // TODO(mbrandy): inspect input to see if nop is appropriate. |
| 888 PPCOperandGenerator g(this); |
| 889 Emit(kPPC_ExtendSignWord32, g.DefineAsRegister(node), |
| 890 g.UseRegister(node->InputAt(0))); |
| 891 } |
| 892 |
| 893 |
| 894 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { |
| 895 // TODO(mbrandy): inspect input to see if nop is appropriate. |
| 896 PPCOperandGenerator g(this); |
| 897 Emit(kPPC_Uint32ToUint64, g.DefineAsRegister(node), |
| 898 g.UseRegister(node->InputAt(0))); |
| 899 } |
| 900 #endif |
| 901 |
| 902 |
| 903 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { |
| 904 PPCOperandGenerator g(this); |
| 905 Emit(kPPC_Float64ToFloat32, g.DefineAsRegister(node), |
| 906 g.UseRegister(node->InputAt(0))); |
| 907 } |
| 908 |
| 909 |
| 910 #if V8_TARGET_ARCH_PPC64 |
| 911 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { |
| 912 PPCOperandGenerator g(this); |
| 913 // TODO(mbrandy): inspect input to see if nop is appropriate. |
| 914 Emit(kPPC_Int64ToInt32, g.DefineAsRegister(node), |
| 915 g.UseRegister(node->InputAt(0))); |
| 916 } |
| 917 #endif |
| 918 |
| 919 |
| 920 void InstructionSelector::VisitFloat64Add(Node* node) { |
| 921 // TODO(mbrandy): detect multiply-add |
| 922 VisitRRRFloat64(this, node, kPPC_AddFloat64); |
| 923 } |
| 924 |
| 925 |
| 926 void InstructionSelector::VisitFloat64Sub(Node* node) { |
| 927 // TODO(mbrandy): detect multiply-subtract |
| 928 VisitRRRFloat64(this, node, kPPC_SubFloat64); |
| 929 } |
| 930 |
| 931 |
| 932 void InstructionSelector::VisitFloat64Mul(Node* node) { |
| 933 // TODO(mbrandy): detect negate |
| 934 VisitRRRFloat64(this, node, kPPC_MulFloat64); |
| 935 } |
| 936 |
| 937 |
| 938 void InstructionSelector::VisitFloat64Div(Node* node) { |
| 939 VisitRRRFloat64(this, node, kPPC_DivFloat64); |
| 940 } |
| 941 |
| 942 |
| 943 void InstructionSelector::VisitFloat64Mod(Node* node) { |
| 944 PPCOperandGenerator g(this); |
| 945 Emit(kPPC_ModFloat64, g.DefineAsFixed(node, d1), |
| 946 g.UseFixed(node->InputAt(0), d1), |
| 947 g.UseFixed(node->InputAt(1), d2))->MarkAsCall(); |
| 948 } |
| 949 |
| 950 |
| 951 void InstructionSelector::VisitFloat64Sqrt(Node* node) { |
| 952 VisitRRFloat64(this, kPPC_SqrtFloat64, node); |
| 953 } |
| 954 |
| 955 |
| 956 void InstructionSelector::VisitFloat64Floor(Node* node) { |
| 957 VisitRRFloat64(this, kPPC_FloorFloat64, node); |
| 958 } |
| 959 |
| 960 |
| 961 void InstructionSelector::VisitFloat64Ceil(Node* node) { |
| 962 VisitRRFloat64(this, kPPC_CeilFloat64, node); |
| 963 } |
| 964 |
| 965 |
| 966 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) { |
| 967 VisitRRFloat64(this, kPPC_TruncateFloat64, node); |
| 968 } |
| 969 |
| 970 |
| 971 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) { |
| 972 VisitRRFloat64(this, kPPC_RoundFloat64, node); |
| 973 } |
| 974 |
| 975 |
| 976 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { |
| 977 if (Node* ovf = NodeProperties::FindProjection(node, 1)) { |
| 978 FlagsContinuation cont(kOverflow, ovf); |
| 979 return VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32, |
| 980 kInt16Imm, &cont); |
| 981 } |
| 982 FlagsContinuation cont; |
| 983 VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32, kInt16Imm, |
| 984 &cont); |
| 985 } |
| 986 |
| 987 |
| 988 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { |
| 989 if (Node* ovf = NodeProperties::FindProjection(node, 1)) { |
| 990 FlagsContinuation cont(kOverflow, ovf); |
| 991 return VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32, |
| 992 kInt16Imm_Negate, &cont); |
| 993 } |
| 994 FlagsContinuation cont; |
| 995 VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32, |
| 996 kInt16Imm_Negate, &cont); |
| 997 } |
| 998 |
| 999 |
| 1000 static bool CompareLogical(FlagsContinuation* cont) { |
| 1001 switch (cont->condition()) { |
| 1002 case kUnsignedLessThan: |
| 1003 case kUnsignedGreaterThanOrEqual: |
| 1004 case kUnsignedLessThanOrEqual: |
| 1005 case kUnsignedGreaterThan: |
| 1006 return true; |
| 1007 default: |
| 1008 return false; |
| 1009 } |
| 1010 UNREACHABLE(); |
| 1011 return false; |
| 1012 } |
| 1013 |
| 1014 |
| 1015 // Shared routine for multiple compare operations. |
| 1016 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, |
| 1017 InstructionOperand left, InstructionOperand right, |
| 1018 FlagsContinuation* cont) { |
| 1019 PPCOperandGenerator g(selector); |
| 1020 opcode = cont->Encode(opcode); |
| 1021 if (cont->IsBranch()) { |
| 1022 selector->Emit(opcode, g.NoOutput(), left, right, |
| 1023 g.Label(cont->true_block()), |
| 1024 g.Label(cont->false_block()))->MarkAsControl(); |
| 1025 } else { |
| 1026 DCHECK(cont->IsSet()); |
| 1027 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right); |
| 1028 } |
| 1029 } |
| 1030 |
| 1031 |
| 1032 // Shared routine for multiple word compare operations. |
| 1033 static void VisitWordCompare(InstructionSelector* selector, Node* node, |
| 1034 InstructionCode opcode, FlagsContinuation* cont, |
| 1035 bool commutative, ImmediateMode immediate_mode) { |
| 1036 PPCOperandGenerator g(selector); |
| 1037 Node* left = node->InputAt(0); |
| 1038 Node* right = node->InputAt(1); |
| 1039 |
| 1040 // Match immediates on left or right side of comparison. |
| 1041 if (g.CanBeImmediate(right, immediate_mode)) { |
| 1042 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), |
| 1043 cont); |
| 1044 } else if (g.CanBeImmediate(left, immediate_mode)) { |
| 1045 if (!commutative) cont->Commute(); |
| 1046 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left), |
| 1047 cont); |
| 1048 } else { |
| 1049 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), |
| 1050 cont); |
| 1051 } |
| 1052 } |
| 1053 |
| 1054 |
| 1055 static void VisitWord32Compare(InstructionSelector* selector, Node* node, |
| 1056 FlagsContinuation* cont) { |
| 1057 ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm); |
| 1058 VisitWordCompare(selector, node, kPPC_Cmp32, cont, false, mode); |
| 1059 } |
| 1060 |
| 1061 |
| 1062 #if V8_TARGET_ARCH_PPC64 |
| 1063 static void VisitWord64Compare(InstructionSelector* selector, Node* node, |
| 1064 FlagsContinuation* cont) { |
| 1065 ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm); |
| 1066 VisitWordCompare(selector, node, kPPC_Cmp64, cont, false, mode); |
| 1067 } |
| 1068 #endif |
| 1069 |
| 1070 |
| 1071 // Shared routine for multiple float compare operations. |
| 1072 static void VisitFloat64Compare(InstructionSelector* selector, Node* node, |
| 1073 FlagsContinuation* cont) { |
| 1074 PPCOperandGenerator g(selector); |
| 1075 Node* left = node->InputAt(0); |
| 1076 Node* right = node->InputAt(1); |
| 1077 VisitCompare(selector, kPPC_CmpFloat64, g.UseRegister(left), |
| 1078 g.UseRegister(right), cont); |
| 1079 } |
| 1080 |
| 1081 |
| 1082 // Shared routine for word comparisons against zero. |
| 1083 static void VisitWordCompareZero(InstructionSelector* selector, Node* user, |
| 1084 Node* value, InstructionCode opcode, |
| 1085 FlagsContinuation* cont) { |
| 1086 while (selector->CanCover(user, value)) { |
| 1087 switch (value->opcode()) { |
| 1088 case IrOpcode::kWord32Equal: { |
| 1089 // Combine with comparisons against 0 by simply inverting the |
| 1090 // continuation. |
| 1091 Int32BinopMatcher m(value); |
| 1092 if (m.right().Is(0)) { |
| 1093 user = value; |
| 1094 value = m.left().node(); |
| 1095 cont->Negate(); |
| 1096 continue; |
| 1097 } |
| 1098 cont->OverwriteAndNegateIfEqual(kEqual); |
| 1099 return VisitWord32Compare(selector, value, cont); |
| 1100 } |
| 1101 case IrOpcode::kInt32LessThan: |
| 1102 cont->OverwriteAndNegateIfEqual(kSignedLessThan); |
| 1103 return VisitWord32Compare(selector, value, cont); |
| 1104 case IrOpcode::kInt32LessThanOrEqual: |
| 1105 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); |
| 1106 return VisitWord32Compare(selector, value, cont); |
| 1107 case IrOpcode::kUint32LessThan: |
| 1108 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); |
| 1109 return VisitWord32Compare(selector, value, cont); |
| 1110 case IrOpcode::kUint32LessThanOrEqual: |
| 1111 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); |
| 1112 return VisitWord32Compare(selector, value, cont); |
| 1113 #if V8_TARGET_ARCH_PPC64 |
| 1114 case IrOpcode::kWord64Equal: { |
| 1115 // Combine with comparisons against 0 by simply inverting the |
| 1116 // continuation. |
| 1117 Int64BinopMatcher m(value); |
| 1118 if (m.right().Is(0)) { |
| 1119 user = value; |
| 1120 value = m.left().node(); |
| 1121 cont->Negate(); |
| 1122 continue; |
| 1123 } |
| 1124 cont->OverwriteAndNegateIfEqual(kEqual); |
| 1125 return VisitWord64Compare(selector, value, cont); |
| 1126 } |
| 1127 case IrOpcode::kInt64LessThan: |
| 1128 cont->OverwriteAndNegateIfEqual(kSignedLessThan); |
| 1129 return VisitWord64Compare(selector, value, cont); |
| 1130 case IrOpcode::kInt64LessThanOrEqual: |
| 1131 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); |
| 1132 return VisitWord64Compare(selector, value, cont); |
| 1133 case IrOpcode::kUint64LessThan: |
| 1134 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); |
| 1135 return VisitWord64Compare(selector, value, cont); |
| 1136 #endif |
| 1137 case IrOpcode::kFloat64Equal: |
| 1138 cont->OverwriteAndNegateIfEqual(kEqual); |
| 1139 return VisitFloat64Compare(selector, value, cont); |
| 1140 case IrOpcode::kFloat64LessThan: |
| 1141 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); |
| 1142 return VisitFloat64Compare(selector, value, cont); |
| 1143 case IrOpcode::kFloat64LessThanOrEqual: |
| 1144 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); |
| 1145 return VisitFloat64Compare(selector, value, cont); |
| 1146 case IrOpcode::kProjection: |
| 1147 // Check if this is the overflow output projection of an |
| 1148 // <Operation>WithOverflow node. |
| 1149 if (ProjectionIndexOf(value->op()) == 1u) { |
| 1150 // We cannot combine the <Operation>WithOverflow with this branch |
| 1151 // unless the 0th projection (the use of the actual value of the |
| 1152 // <Operation> is either NULL, which means there's no use of the |
| 1153 // actual value, or was already defined, which means it is scheduled |
| 1154 // *AFTER* this branch). |
| 1155 Node* const node = value->InputAt(0); |
| 1156 Node* const result = NodeProperties::FindProjection(node, 0); |
| 1157 if (result == NULL || selector->IsDefined(result)) { |
| 1158 switch (node->opcode()) { |
| 1159 case IrOpcode::kInt32AddWithOverflow: |
| 1160 cont->OverwriteAndNegateIfEqual(kOverflow); |
| 1161 return VisitBinop<Int32BinopMatcher>( |
| 1162 selector, node, kPPC_AddWithOverflow32, kInt16Imm, cont); |
| 1163 case IrOpcode::kInt32SubWithOverflow: |
| 1164 cont->OverwriteAndNegateIfEqual(kOverflow); |
| 1165 return VisitBinop<Int32BinopMatcher>(selector, node, |
| 1166 kPPC_SubWithOverflow32, |
| 1167 kInt16Imm_Negate, cont); |
| 1168 default: |
| 1169 break; |
| 1170 } |
| 1171 } |
| 1172 } |
| 1173 break; |
| 1174 case IrOpcode::kInt32Sub: |
| 1175 return VisitWord32Compare(selector, value, cont); |
| 1176 case IrOpcode::kWord32And: |
| 1177 // TODO(mbandy): opportunity for rlwinm? |
| 1178 return VisitWordCompare(selector, value, kPPC_Tst32, cont, true, |
| 1179 kInt16Imm_Unsigned); |
| 1180 // TODO(mbrandy): Handle? |
| 1181 // case IrOpcode::kInt32Add: |
| 1182 // case IrOpcode::kWord32Or: |
| 1183 // case IrOpcode::kWord32Xor: |
| 1184 // case IrOpcode::kWord32Sar: |
| 1185 // case IrOpcode::kWord32Shl: |
| 1186 // case IrOpcode::kWord32Shr: |
| 1187 // case IrOpcode::kWord32Ror: |
| 1188 #if V8_TARGET_ARCH_PPC64 |
| 1189 case IrOpcode::kInt64Sub: |
| 1190 return VisitWord64Compare(selector, value, cont); |
| 1191 case IrOpcode::kWord64And: |
| 1192 // TODO(mbandy): opportunity for rldic? |
| 1193 return VisitWordCompare(selector, value, kPPC_Tst64, cont, true, |
| 1194 kInt16Imm_Unsigned); |
| 1195 // TODO(mbrandy): Handle? |
| 1196 // case IrOpcode::kInt64Add: |
| 1197 // case IrOpcode::kWord64Or: |
| 1198 // case IrOpcode::kWord64Xor: |
| 1199 // case IrOpcode::kWord64Sar: |
| 1200 // case IrOpcode::kWord64Shl: |
| 1201 // case IrOpcode::kWord64Shr: |
| 1202 // case IrOpcode::kWord64Ror: |
| 1203 #endif |
| 1204 default: |
| 1205 break; |
| 1206 } |
| 1207 break; |
| 1208 } |
| 1209 |
| 1210 // Branch could not be combined with a compare, emit compare against 0. |
| 1211 PPCOperandGenerator g(selector); |
| 1212 VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0), |
| 1213 cont); |
| 1214 } |
| 1215 |
| 1216 |
| 1217 static void VisitWord32CompareZero(InstructionSelector* selector, Node* user, |
| 1218 Node* value, FlagsContinuation* cont) { |
| 1219 VisitWordCompareZero(selector, user, value, kPPC_Cmp32, cont); |
| 1220 } |
| 1221 |
| 1222 |
| 1223 #if V8_TARGET_ARCH_PPC64 |
| 1224 static void VisitWord64CompareZero(InstructionSelector* selector, Node* user, |
| 1225 Node* value, FlagsContinuation* cont) { |
| 1226 VisitWordCompareZero(selector, user, value, kPPC_Cmp64, cont); |
| 1227 } |
| 1228 #endif |
| 1229 |
| 1230 |
| 1231 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch, |
| 1232 BasicBlock* fbranch) { |
| 1233 FlagsContinuation cont(kNotEqual, tbranch, fbranch); |
| 1234 VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont); |
| 1235 } |
| 1236 |
| 1237 |
| 1238 void InstructionSelector::VisitWord32Equal(Node* const node) { |
| 1239 FlagsContinuation cont(kEqual, node); |
| 1240 Int32BinopMatcher m(node); |
| 1241 if (m.right().Is(0)) { |
| 1242 return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont); |
| 1243 } |
| 1244 VisitWord32Compare(this, node, &cont); |
| 1245 } |
| 1246 |
| 1247 |
| 1248 void InstructionSelector::VisitInt32LessThan(Node* node) { |
| 1249 FlagsContinuation cont(kSignedLessThan, node); |
| 1250 VisitWord32Compare(this, node, &cont); |
| 1251 } |
| 1252 |
| 1253 |
| 1254 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) { |
| 1255 FlagsContinuation cont(kSignedLessThanOrEqual, node); |
| 1256 VisitWord32Compare(this, node, &cont); |
| 1257 } |
| 1258 |
| 1259 |
| 1260 void InstructionSelector::VisitUint32LessThan(Node* node) { |
| 1261 FlagsContinuation cont(kUnsignedLessThan, node); |
| 1262 VisitWord32Compare(this, node, &cont); |
| 1263 } |
| 1264 |
| 1265 |
| 1266 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { |
| 1267 FlagsContinuation cont(kUnsignedLessThanOrEqual, node); |
| 1268 VisitWord32Compare(this, node, &cont); |
| 1269 } |
| 1270 |
| 1271 |
| 1272 #if V8_TARGET_ARCH_PPC64 |
| 1273 void InstructionSelector::VisitWord64Equal(Node* const node) { |
| 1274 FlagsContinuation cont(kEqual, node); |
| 1275 Int64BinopMatcher m(node); |
| 1276 if (m.right().Is(0)) { |
| 1277 return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont); |
| 1278 } |
| 1279 VisitWord64Compare(this, node, &cont); |
| 1280 } |
| 1281 |
| 1282 |
| 1283 void InstructionSelector::VisitInt64LessThan(Node* node) { |
| 1284 FlagsContinuation cont(kSignedLessThan, node); |
| 1285 VisitWord64Compare(this, node, &cont); |
| 1286 } |
| 1287 |
| 1288 |
| 1289 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { |
| 1290 FlagsContinuation cont(kSignedLessThanOrEqual, node); |
| 1291 VisitWord64Compare(this, node, &cont); |
| 1292 } |
| 1293 |
| 1294 |
| 1295 void InstructionSelector::VisitUint64LessThan(Node* node) { |
| 1296 FlagsContinuation cont(kUnsignedLessThan, node); |
| 1297 VisitWord64Compare(this, node, &cont); |
| 1298 } |
| 1299 #endif |
| 1300 |
| 1301 |
| 1302 void InstructionSelector::VisitFloat64Equal(Node* node) { |
| 1303 FlagsContinuation cont(kEqual, node); |
| 1304 VisitFloat64Compare(this, node, &cont); |
| 1305 } |
| 1306 |
| 1307 |
| 1308 void InstructionSelector::VisitFloat64LessThan(Node* node) { |
| 1309 FlagsContinuation cont(kUnsignedLessThan, node); |
| 1310 VisitFloat64Compare(this, node, &cont); |
| 1311 } |
| 1312 |
| 1313 |
| 1314 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { |
| 1315 FlagsContinuation cont(kUnsignedLessThanOrEqual, node); |
| 1316 VisitFloat64Compare(this, node, &cont); |
| 1317 } |
| 1318 |
| 1319 |
| 1320 void InstructionSelector::VisitCall(Node* node) { |
| 1321 PPCOperandGenerator g(this); |
| 1322 const CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node); |
| 1323 |
| 1324 FrameStateDescriptor* frame_state_descriptor = NULL; |
| 1325 if (descriptor->NeedsFrameState()) { |
| 1326 frame_state_descriptor = |
| 1327 GetFrameStateDescriptor(node->InputAt(descriptor->InputCount())); |
| 1328 } |
| 1329 |
| 1330 CallBuffer buffer(zone(), descriptor, frame_state_descriptor); |
| 1331 |
| 1332 // Compute InstructionOperands for inputs and outputs. |
| 1333 // TODO(turbofan): on PPC it's probably better to use the code object in a |
| 1334 // register if there are multiple uses of it. Improve constant pool and the |
| 1335 // heuristics in the register allocator for where to emit constants. |
| 1336 InitializeCallBuffer(node, &buffer, true, false); |
| 1337 |
| 1338 // Push any stack arguments. |
| 1339 // TODO(mbrandy): reverse order and use push only for first |
| 1340 for (auto i = buffer.pushed_nodes.rbegin(); i != buffer.pushed_nodes.rend(); |
| 1341 i++) { |
| 1342 Emit(kPPC_Push, g.NoOutput(), g.UseRegister(*i)); |
| 1343 } |
| 1344 |
| 1345 // Select the appropriate opcode based on the call type. |
| 1346 InstructionCode opcode; |
| 1347 switch (descriptor->kind()) { |
| 1348 case CallDescriptor::kCallCodeObject: { |
| 1349 opcode = kArchCallCodeObject; |
| 1350 break; |
| 1351 } |
| 1352 case CallDescriptor::kCallJSFunction: |
| 1353 opcode = kArchCallJSFunction; |
| 1354 break; |
| 1355 default: |
| 1356 UNREACHABLE(); |
| 1357 return; |
| 1358 } |
| 1359 opcode |= MiscField::encode(descriptor->flags()); |
| 1360 |
| 1361 // Emit the call instruction. |
| 1362 InstructionOperand* first_output = |
| 1363 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL; |
| 1364 Instruction* call_instr = |
| 1365 Emit(opcode, buffer.outputs.size(), first_output, |
| 1366 buffer.instruction_args.size(), &buffer.instruction_args.front()); |
| 1367 call_instr->MarkAsCall(); |
| 1368 } |
| 1369 |
| 1370 |
| 1371 // static |
| 1372 MachineOperatorBuilder::Flags |
| 1373 InstructionSelector::SupportedMachineOperatorFlags() { |
| 1374 return MachineOperatorBuilder::kFloat64Floor | |
| 1375 MachineOperatorBuilder::kFloat64Ceil | |
| 1376 MachineOperatorBuilder::kFloat64RoundTruncate | |
| 1377 MachineOperatorBuilder::kFloat64RoundTiesAway; |
| 1378 // We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f. |
| 1379 } |
| 1380 |
| 1381 } // namespace compiler |
| 1382 } // namespace internal |
| 1383 } // namespace v8 |
OLD | NEW |