| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/base/adapters.h" | 5 #include "src/base/adapters.h" |
| 6 #include "src/base/bits.h" | 6 #include "src/base/bits.h" |
| 7 #include "src/compiler/instruction-selector-impl.h" | 7 #include "src/compiler/instruction-selector-impl.h" |
| 8 #include "src/compiler/node-matchers.h" | 8 #include "src/compiler/node-matchers.h" |
| 9 #include "src/compiler/node-properties.h" | 9 #include "src/compiler/node-properties.h" |
| 10 | 10 |
| (...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 288 if (lsb + mask_width > 32) mask_width = 32 - lsb; | 288 if (lsb + mask_width > 32) mask_width = 32 - lsb; |
| 289 | 289 |
| 290 Emit(kMips64Ext, g.DefineAsRegister(node), | 290 Emit(kMips64Ext, g.DefineAsRegister(node), |
| 291 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), | 291 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), |
| 292 g.TempImmediate(mask_width)); | 292 g.TempImmediate(mask_width)); |
| 293 return; | 293 return; |
| 294 } | 294 } |
| 295 // Other cases fall through to the normal And operation. | 295 // Other cases fall through to the normal And operation. |
| 296 } | 296 } |
| 297 } | 297 } |
| 298 if (m.right().HasValue()) { |
| 299 uint32_t mask = m.right().Value(); |
| 300 uint32_t shift = base::bits::CountPopulation32(~mask); |
| 301 uint32_t msb = base::bits::CountLeadingZeros32(~mask); |
| 302 if (shift != 0 && shift != 32 && msb + shift == 32) { |
| 303 // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction |
| 304 // and remove constant loading of inverted mask. |
| 305 Emit(kMips64Ins, g.DefineSameAsFirst(node), |
| 306 g.UseRegister(m.left().node()), g.TempImmediate(0), |
| 307 g.TempImmediate(shift)); |
| 308 return; |
| 309 } |
| 310 } |
| 298 VisitBinop(this, node, kMips64And); | 311 VisitBinop(this, node, kMips64And); |
| 299 } | 312 } |
| 300 | 313 |
| 301 | 314 |
| 302 void InstructionSelector::VisitWord64And(Node* node) { | 315 void InstructionSelector::VisitWord64And(Node* node) { |
| 303 Mips64OperandGenerator g(this); | 316 Mips64OperandGenerator g(this); |
| 304 Int64BinopMatcher m(node); | 317 Int64BinopMatcher m(node); |
| 305 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) && | 318 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) && |
| 306 m.right().HasValue()) { | 319 m.right().HasValue()) { |
| 307 uint64_t mask = m.right().Value(); | 320 uint64_t mask = m.right().Value(); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 325 if (lsb + mask_width > 64) mask_width = 64 - lsb; | 338 if (lsb + mask_width > 64) mask_width = 64 - lsb; |
| 326 | 339 |
| 327 Emit(kMips64Dext, g.DefineAsRegister(node), | 340 Emit(kMips64Dext, g.DefineAsRegister(node), |
| 328 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), | 341 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), |
| 329 g.TempImmediate(static_cast<int32_t>(mask_width))); | 342 g.TempImmediate(static_cast<int32_t>(mask_width))); |
| 330 return; | 343 return; |
| 331 } | 344 } |
| 332 // Other cases fall through to the normal And operation. | 345 // Other cases fall through to the normal And operation. |
| 333 } | 346 } |
| 334 } | 347 } |
| 348 if (m.right().HasValue()) { |
| 349 uint64_t mask = m.right().Value(); |
| 350 uint32_t shift = base::bits::CountPopulation64(~mask); |
| 351 uint32_t msb = base::bits::CountLeadingZeros64(~mask); |
| 352 if (shift != 0 && shift < 32 && msb + shift == 64) { |
| 353 // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction |
| 354 // and remove constant loading of inverted mask. Dins cannot insert bits |
| 355 // past word size, so shifts smaller than 32 are covered. |
| 356 Emit(kMips64Dins, g.DefineSameAsFirst(node), |
| 357 g.UseRegister(m.left().node()), g.TempImmediate(0), |
| 358 g.TempImmediate(shift)); |
| 359 return; |
| 360 } |
| 361 } |
| 335 VisitBinop(this, node, kMips64And); | 362 VisitBinop(this, node, kMips64And); |
| 336 } | 363 } |
| 337 | 364 |
| 338 | 365 |
| 339 void InstructionSelector::VisitWord32Or(Node* node) { | 366 void InstructionSelector::VisitWord32Or(Node* node) { |
| 340 VisitBinop(this, node, kMips64Or); | 367 VisitBinop(this, node, kMips64Or); |
| 341 } | 368 } |
| 342 | 369 |
| 343 | 370 |
| 344 void InstructionSelector::VisitWord64Or(Node* node) { | 371 void InstructionSelector::VisitWord64Or(Node* node) { |
| (...skipping 1276 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1621 MachineOperatorBuilder::kFloat32RoundUp | | 1648 MachineOperatorBuilder::kFloat32RoundUp | |
| 1622 MachineOperatorBuilder::kFloat64RoundTruncate | | 1649 MachineOperatorBuilder::kFloat64RoundTruncate | |
| 1623 MachineOperatorBuilder::kFloat32RoundTruncate | | 1650 MachineOperatorBuilder::kFloat32RoundTruncate | |
| 1624 MachineOperatorBuilder::kFloat64RoundTiesEven | | 1651 MachineOperatorBuilder::kFloat64RoundTiesEven | |
| 1625 MachineOperatorBuilder::kFloat32RoundTiesEven; | 1652 MachineOperatorBuilder::kFloat32RoundTiesEven; |
| 1626 } | 1653 } |
| 1627 | 1654 |
| 1628 } // namespace compiler | 1655 } // namespace compiler |
| 1629 } // namespace internal | 1656 } // namespace internal |
| 1630 } // namespace v8 | 1657 } // namespace v8 |
| OLD | NEW |