| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/instruction-selector-impl.h" | 5 #include "src/compiler/instruction-selector-impl.h" |
| 6 #include "src/compiler/node-matchers.h" | 6 #include "src/compiler/node-matchers.h" |
| 7 | 7 |
| 8 namespace v8 { | 8 namespace v8 { |
| 9 namespace internal { | 9 namespace internal { |
| 10 namespace compiler { | 10 namespace compiler { |
| (...skipping 420 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 431 uint32_t mask_width = base::bits::CountPopulation32(mask); | 431 uint32_t mask_width = base::bits::CountPopulation32(mask); |
| 432 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); | 432 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); |
| 433 if ((mask_width != 0) && (mask_msb + mask_width == 32)) { | 433 if ((mask_width != 0) && (mask_msb + mask_width == 32)) { |
| 434 // The mask must be contiguous, and occupy the least-significant bits. | 434 // The mask must be contiguous, and occupy the least-significant bits. |
| 435 DCHECK_EQ(0, base::bits::CountTrailingZeros32(mask)); | 435 DCHECK_EQ(0, base::bits::CountTrailingZeros32(mask)); |
| 436 | 436 |
| 437 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least | 437 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least |
| 438 // significant bits. | 438 // significant bits. |
| 439 Int32BinopMatcher mleft(m.left().node()); | 439 Int32BinopMatcher mleft(m.left().node()); |
| 440 if (mleft.right().IsInRange(0, 31)) { | 440 if (mleft.right().IsInRange(0, 31)) { |
| 441 // Ubfx cannot extract bits past the register size, however since |
| 442 // shifting the original value would have introduced some zeros we can |
| 443 // still use ubfx with a smaller mask and the remaining bits will be |
| 444 // zeros. |
| 445 uint32_t lsb = mleft.right().Value(); |
| 446 if (lsb + mask_width > 32) mask_width = 32 - lsb; |
| 447 |
| 441 Emit(kArm64Ubfx32, g.DefineAsRegister(node), | 448 Emit(kArm64Ubfx32, g.DefineAsRegister(node), |
| 442 g.UseRegister(mleft.left().node()), | 449 g.UseRegister(mleft.left().node()), |
| 443 g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width)); | 450 g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width)); |
| 444 return; | 451 return; |
| 445 } | 452 } |
| 446 // Other cases fall through to the normal And operation. | 453 // Other cases fall through to the normal And operation. |
| 447 } | 454 } |
| 448 } | 455 } |
| 449 VisitLogical<Int32BinopMatcher>( | 456 VisitLogical<Int32BinopMatcher>( |
| 450 this, node, &m, kArm64And32, CanCover(node, m.left().node()), | 457 this, node, &m, kArm64And32, CanCover(node, m.left().node()), |
| (...skipping 10 matching lines...) Expand all Loading... |
| 461 uint64_t mask_width = base::bits::CountPopulation64(mask); | 468 uint64_t mask_width = base::bits::CountPopulation64(mask); |
| 462 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask); | 469 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask); |
| 463 if ((mask_width != 0) && (mask_msb + mask_width == 64)) { | 470 if ((mask_width != 0) && (mask_msb + mask_width == 64)) { |
| 464 // The mask must be contiguous, and occupy the least-significant bits. | 471 // The mask must be contiguous, and occupy the least-significant bits. |
| 465 DCHECK_EQ(0, base::bits::CountTrailingZeros64(mask)); | 472 DCHECK_EQ(0, base::bits::CountTrailingZeros64(mask)); |
| 466 | 473 |
| 467 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least | 474 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least |
| 468 // significant bits. | 475 // significant bits. |
| 469 Int64BinopMatcher mleft(m.left().node()); | 476 Int64BinopMatcher mleft(m.left().node()); |
| 470 if (mleft.right().IsInRange(0, 63)) { | 477 if (mleft.right().IsInRange(0, 63)) { |
| 478 // Ubfx cannot extract bits past the register size, however since |
| 479 // shifting the original value would have introduced some zeros we can |
| 480 // still use ubfx with a smaller mask and the remaining bits will be |
| 481 // zeros. |
| 482 uint64_t lsb = mleft.right().Value(); |
| 483 if (lsb + mask_width > 64) mask_width = 64 - lsb; |
| 484 |
| 471 Emit(kArm64Ubfx, g.DefineAsRegister(node), | 485 Emit(kArm64Ubfx, g.DefineAsRegister(node), |
| 472 g.UseRegister(mleft.left().node()), | 486 g.UseRegister(mleft.left().node()), |
| 473 g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width)); | 487 g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width)); |
| 474 return; | 488 return; |
| 475 } | 489 } |
| 476 // Other cases fall through to the normal And operation. | 490 // Other cases fall through to the normal And operation. |
| 477 } | 491 } |
| 478 } | 492 } |
| 479 VisitLogical<Int64BinopMatcher>( | 493 VisitLogical<Int64BinopMatcher>( |
| 480 this, node, &m, kArm64And, CanCover(node, m.left().node()), | 494 this, node, &m, kArm64And, CanCover(node, m.left().node()), |
| (...skipping 821 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1302 | 1316 |
| 1303 // static | 1317 // static |
| 1304 MachineOperatorBuilder::Flags | 1318 MachineOperatorBuilder::Flags |
| 1305 InstructionSelector::SupportedMachineOperatorFlags() { | 1319 InstructionSelector::SupportedMachineOperatorFlags() { |
| 1306 return MachineOperatorBuilder::kNoFlags; | 1320 return MachineOperatorBuilder::kNoFlags; |
| 1307 } | 1321 } |
| 1308 | 1322 |
| 1309 } // namespace compiler | 1323 } // namespace compiler |
| 1310 } // namespace internal | 1324 } // namespace internal |
| 1311 } // namespace v8 | 1325 } // namespace v8 |
| OLD | NEW |