OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/base/adapters.h" | 5 #include "src/base/adapters.h" |
6 #include "src/base/bits.h" | 6 #include "src/base/bits.h" |
7 #include "src/compiler/instruction-selector-impl.h" | 7 #include "src/compiler/instruction-selector-impl.h" |
8 #include "src/compiler/node-matchers.h" | 8 #include "src/compiler/node-matchers.h" |
9 #include "src/compiler/node-properties.h" | 9 #include "src/compiler/node-properties.h" |
10 | 10 |
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
262 | 262 |
263 | 263 |
264 void InstructionSelector::VisitWord32And(Node* node) { | 264 void InstructionSelector::VisitWord32And(Node* node) { |
265 Mips64OperandGenerator g(this); | 265 Mips64OperandGenerator g(this); |
266 Int32BinopMatcher m(node); | 266 Int32BinopMatcher m(node); |
267 if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) && | 267 if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) && |
268 m.right().HasValue()) { | 268 m.right().HasValue()) { |
269 uint32_t mask = m.right().Value(); | 269 uint32_t mask = m.right().Value(); |
270 uint32_t mask_width = base::bits::CountPopulation32(mask); | 270 uint32_t mask_width = base::bits::CountPopulation32(mask); |
271 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); | 271 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); |
272 if ((mask_width != 0) && (mask_msb + mask_width == 32)) { | 272 uint32_t mask_lsb = base::bits::CountTrailingZeros32(mask); |
273 // The mask must be contiguous, and occupy the least-significant bits. | 273 if ((mask_width != 0) && (mask_msb + mask_width + mask_lsb == 32)) { |
274 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); | 274 // The mask must be contiguous. |
275 | 275 // Select Ext for And(Shr(x, imm), mask) where the mask may be in |
276 // Select Ext for And(Shr(x, imm), mask) where the mask is in the least | 276 // the least-significant bits or elsewhere. |
277 // significant bits. | |
278 Int32BinopMatcher mleft(m.left().node()); | 277 Int32BinopMatcher mleft(m.left().node()); |
279 if (mleft.right().HasValue()) { | 278 if (mleft.right().HasValue()) { |
280 // Any shift value can match; int32 shifts use `value % 32`. | 279 // Any shift value can match; int32 shifts use `value % 32`. |
281 uint32_t lsb = mleft.right().Value() & 0x1f; | 280 uint32_t lsb = mleft.right().Value() & 0x1f; |
282 | 281 lsb = lsb + mask_lsb; |
283 // Ext cannot extract bits past the register size, however since | 282 // Ext cannot extract bits past the register size, however since |
284 // shifting the original value would have introduced some zeros we can | 283 // shifting the original value would have introduced some zeros we can |
285 // still use Ext with a smaller mask and the remaining bits will be | 284 // still use Ext with a smaller mask and the remaining bits will be |
286 // zeros. | 285 // zeros. |
287 if (lsb + mask_width > 32) mask_width = 32 - lsb; | 286 if (lsb + mask_width > 32) mask_width = 32 - lsb; |
288 | 287 |
289 Emit(kMips64Ext, g.DefineAsRegister(node), | 288 Emit(kMips64Ext, g.DefineAsRegister(node), |
290 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), | 289 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), |
291 g.TempImmediate(mask_width)); | 290 g.TempImmediate(mask_width)); |
292 return; | 291 return; |
(...skipping 19 matching lines...) Expand all Loading... |
312 | 311 |
313 | 312 |
314 void InstructionSelector::VisitWord64And(Node* node) { | 313 void InstructionSelector::VisitWord64And(Node* node) { |
315 Mips64OperandGenerator g(this); | 314 Mips64OperandGenerator g(this); |
316 Int64BinopMatcher m(node); | 315 Int64BinopMatcher m(node); |
317 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) && | 316 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) && |
318 m.right().HasValue()) { | 317 m.right().HasValue()) { |
319 uint64_t mask = m.right().Value(); | 318 uint64_t mask = m.right().Value(); |
320 uint32_t mask_width = base::bits::CountPopulation64(mask); | 319 uint32_t mask_width = base::bits::CountPopulation64(mask); |
321 uint32_t mask_msb = base::bits::CountLeadingZeros64(mask); | 320 uint32_t mask_msb = base::bits::CountLeadingZeros64(mask); |
322 if ((mask_width != 0) && (mask_msb + mask_width == 64)) { | 321 uint32_t mask_lsb = base::bits::CountTrailingZeros64(mask); |
323 // The mask must be contiguous, and occupy the least-significant bits. | 322 if ((mask_width != 0) && (mask_msb + mask_width + mask_lsb == 64)) { |
324 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); | 323 // The mask must be contiguous. |
325 | 324 // Select Dext for And(Shr(x, imm), mask) where the mask may be in |
326 // Select Dext for And(Shr(x, imm), mask) where the mask is in the least | 325 // the least-significant bits or elsewhere. |
327 // significant bits. | |
328 Int64BinopMatcher mleft(m.left().node()); | 326 Int64BinopMatcher mleft(m.left().node()); |
329 if (mleft.right().HasValue()) { | 327 if (mleft.right().HasValue()) { |
330 // Any shift value can match; int64 shifts use `value % 64`. | 328 // Any shift value can match; int64 shifts use `value % 64`. |
331 uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f); | 329 uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f); |
332 | 330 lsb = lsb + mask_lsb; |
333 // Dext cannot extract bits past the register size, however since | 331 // Dext cannot extract bits past the register size, however since |
334 // shifting the original value would have introduced some zeros we can | 332 // shifting the original value would have introduced some zeros we can |
335 // still use Dext with a smaller mask and the remaining bits will be | 333 // still use Dext with a smaller mask and the remaining bits will be |
336 // zeros. | 334 // zeros. |
337 if (lsb + mask_width > 64) mask_width = 64 - lsb; | 335 if (lsb + mask_width > 64) mask_width = 64 - lsb; |
338 | |
339 Emit(kMips64Dext, g.DefineAsRegister(node), | 336 Emit(kMips64Dext, g.DefineAsRegister(node), |
340 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), | 337 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), |
341 g.TempImmediate(static_cast<int32_t>(mask_width))); | 338 g.TempImmediate(static_cast<int32_t>(mask_width))); |
342 return; | 339 return; |
343 } | 340 } |
344 // Other cases fall through to the normal And operation. | 341 // Other cases fall through to the normal And operation. |
345 } | 342 } |
346 } | 343 } |
347 if (m.right().HasValue()) { | 344 if (m.right().HasValue()) { |
348 uint64_t mask = m.right().Value(); | 345 uint64_t mask = m.right().Value(); |
(...skipping 1484 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1833 MachineOperatorBuilder::kFloat32RoundUp | | 1830 MachineOperatorBuilder::kFloat32RoundUp | |
1834 MachineOperatorBuilder::kFloat64RoundTruncate | | 1831 MachineOperatorBuilder::kFloat64RoundTruncate | |
1835 MachineOperatorBuilder::kFloat32RoundTruncate | | 1832 MachineOperatorBuilder::kFloat32RoundTruncate | |
1836 MachineOperatorBuilder::kFloat64RoundTiesEven | | 1833 MachineOperatorBuilder::kFloat64RoundTiesEven | |
1837 MachineOperatorBuilder::kFloat32RoundTiesEven; | 1834 MachineOperatorBuilder::kFloat32RoundTiesEven; |
1838 } | 1835 } |
1839 | 1836 |
1840 } // namespace compiler | 1837 } // namespace compiler |
1841 } // namespace internal | 1838 } // namespace internal |
1842 } // namespace v8 | 1839 } // namespace v8 |
OLD | NEW |