OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/base/adapters.h" | 5 #include "src/base/adapters.h" |
6 #include "src/base/bits.h" | 6 #include "src/base/bits.h" |
7 #include "src/compiler/instruction-selector-impl.h" | 7 #include "src/compiler/instruction-selector-impl.h" |
8 #include "src/compiler/node-matchers.h" | 8 #include "src/compiler/node-matchers.h" |
9 #include "src/compiler/node-properties.h" | 9 #include "src/compiler/node-properties.h" |
10 | 10 |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
256 g.UseRegister(index), g.UseRegister(base)); | 256 g.UseRegister(index), g.UseRegister(base)); |
257 // Emit desired store opcode, using temp addr_reg. | 257 // Emit desired store opcode, using temp addr_reg. |
258 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), | 258 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), |
259 addr_reg, g.TempImmediate(0), g.UseRegister(value)); | 259 addr_reg, g.TempImmediate(0), g.UseRegister(value)); |
260 } | 260 } |
261 } | 261 } |
262 } | 262 } |
263 | 263 |
264 | 264 |
265 void InstructionSelector::VisitWord32And(Node* node) { | 265 void InstructionSelector::VisitWord32And(Node* node) { |
| 266 Mips64OperandGenerator g(this); |
| 267 Int32BinopMatcher m(node); |
| 268 if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) && |
| 269 m.right().HasValue()) { |
| 270 uint32_t mask = m.right().Value(); |
| 271 uint32_t mask_width = base::bits::CountPopulation32(mask); |
| 272 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); |
| 273 if ((mask_width != 0) && (mask_msb + mask_width == 32)) { |
| 274 // The mask must be contiguous, and occupy the least-significant bits. |
| 275 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); |
| 276 |
| 277 // Select Ext for And(Shr(x, imm), mask) where the mask is in the least |
| 278 // significant bits. |
| 279 Int32BinopMatcher mleft(m.left().node()); |
| 280 if (mleft.right().HasValue()) { |
| 281 // Any shift value can match; int32 shifts use `value % 32`. |
| 282 uint32_t lsb = mleft.right().Value() & 0x1f; |
| 283 |
| 284 // Ext cannot extract bits past the register size, however since |
| 285 // shifting the original value would have introduced some zeros we can |
| 286 // still use Ext with a smaller mask and the remaining bits will be |
| 287 // zeros. |
| 288 if (lsb + mask_width > 32) mask_width = 32 - lsb; |
| 289 |
| 290 Emit(kMips64Ext, g.DefineAsRegister(node), |
| 291 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), |
| 292 g.TempImmediate(mask_width)); |
| 293 return; |
| 294 } |
| 295 // Other cases fall through to the normal And operation. |
| 296 } |
| 297 } |
266 VisitBinop(this, node, kMips64And); | 298 VisitBinop(this, node, kMips64And); |
267 } | 299 } |
268 | 300 |
269 | 301 |
270 void InstructionSelector::VisitWord64And(Node* node) { | 302 void InstructionSelector::VisitWord64And(Node* node) { |
| 303 Mips64OperandGenerator g(this); |
| 304 Int64BinopMatcher m(node); |
| 305 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) && |
| 306 m.right().HasValue()) { |
| 307 uint64_t mask = m.right().Value(); |
| 308 uint32_t mask_width = base::bits::CountPopulation64(mask); |
| 309 uint32_t mask_msb = base::bits::CountLeadingZeros64(mask); |
| 310 if ((mask_width != 0) && (mask_msb + mask_width == 64)) { |
| 311 // The mask must be contiguous, and occupy the least-significant bits. |
| 312 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); |
| 313 |
| 314 // Select Dext for And(Shr(x, imm), mask) where the mask is in the least |
| 315 // significant bits. |
| 316 Int64BinopMatcher mleft(m.left().node()); |
| 317 if (mleft.right().HasValue()) { |
| 318 // Any shift value can match; int64 shifts use `value % 64`. |
| 319 uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f); |
| 320 |
| 321 // Dext cannot extract bits past the register size, however since |
| 322 // shifting the original value would have introduced some zeros we can |
| 323 // still use Dext with a smaller mask and the remaining bits will be |
| 324 // zeros. |
| 325 if (lsb + mask_width > 64) mask_width = 64 - lsb; |
| 326 |
| 327 Emit(kMips64Dext, g.DefineAsRegister(node), |
| 328 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), |
| 329 g.TempImmediate(static_cast<int32_t>(mask_width))); |
| 330 return; |
| 331 } |
| 332 // Other cases fall through to the normal And operation. |
| 333 } |
| 334 } |
271 VisitBinop(this, node, kMips64And); | 335 VisitBinop(this, node, kMips64And); |
272 } | 336 } |
273 | 337 |
274 | 338 |
275 void InstructionSelector::VisitWord32Or(Node* node) { | 339 void InstructionSelector::VisitWord32Or(Node* node) { |
276 VisitBinop(this, node, kMips64Or); | 340 VisitBinop(this, node, kMips64Or); |
277 } | 341 } |
278 | 342 |
279 | 343 |
280 void InstructionSelector::VisitWord64Or(Node* node) { | 344 void InstructionSelector::VisitWord64Or(Node* node) { |
(...skipping 10 matching lines...) Expand all Loading... |
291 VisitBinop(this, node, kMips64Xor); | 355 VisitBinop(this, node, kMips64Xor); |
292 } | 356 } |
293 | 357 |
294 | 358 |
295 void InstructionSelector::VisitWord32Shl(Node* node) { | 359 void InstructionSelector::VisitWord32Shl(Node* node) { |
296 VisitRRO(this, kMips64Shl, node); | 360 VisitRRO(this, kMips64Shl, node); |
297 } | 361 } |
298 | 362 |
299 | 363 |
300 void InstructionSelector::VisitWord32Shr(Node* node) { | 364 void InstructionSelector::VisitWord32Shr(Node* node) { |
| 365 Int32BinopMatcher m(node); |
| 366 if (m.left().IsWord32And() && m.right().HasValue()) { |
| 367 uint32_t lsb = m.right().Value() & 0x1f; |
| 368 Int32BinopMatcher mleft(m.left().node()); |
| 369 if (mleft.right().HasValue()) { |
| 370 // Select Ext for Shr(And(x, mask), imm) where the result of the mask is |
| 371 // shifted into the least-significant bits. |
| 372 uint32_t mask = (mleft.right().Value() >> lsb) << lsb; |
| 373 unsigned mask_width = base::bits::CountPopulation32(mask); |
| 374 unsigned mask_msb = base::bits::CountLeadingZeros32(mask); |
| 375 if ((mask_msb + mask_width + lsb) == 32) { |
| 376 Mips64OperandGenerator g(this); |
| 377 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask)); |
| 378 Emit(kMips64Ext, g.DefineAsRegister(node), |
| 379 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), |
| 380 g.TempImmediate(mask_width)); |
| 381 return; |
| 382 } |
| 383 } |
| 384 } |
301 VisitRRO(this, kMips64Shr, node); | 385 VisitRRO(this, kMips64Shr, node); |
302 } | 386 } |
303 | 387 |
304 | 388 |
305 void InstructionSelector::VisitWord32Sar(Node* node) { | 389 void InstructionSelector::VisitWord32Sar(Node* node) { |
306 VisitRRO(this, kMips64Sar, node); | 390 VisitRRO(this, kMips64Sar, node); |
307 } | 391 } |
308 | 392 |
309 | 393 |
310 void InstructionSelector::VisitWord64Shl(Node* node) { | 394 void InstructionSelector::VisitWord64Shl(Node* node) { |
311 Mips64OperandGenerator g(this); | 395 Mips64OperandGenerator g(this); |
312 Int64BinopMatcher m(node); | 396 Int64BinopMatcher m(node); |
313 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) && | 397 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) && |
314 m.right().IsInRange(32, 63)) { | 398 m.right().IsInRange(32, 63)) { |
315 // There's no need to sign/zero-extend to 64-bit if we shift out the upper | 399 // There's no need to sign/zero-extend to 64-bit if we shift out the upper |
316 // 32 bits anyway. | 400 // 32 bits anyway. |
317 Emit(kMips64Dshl, g.DefineSameAsFirst(node), | 401 Emit(kMips64Dshl, g.DefineSameAsFirst(node), |
318 g.UseRegister(m.left().node()->InputAt(0)), | 402 g.UseRegister(m.left().node()->InputAt(0)), |
319 g.UseImmediate(m.right().node())); | 403 g.UseImmediate(m.right().node())); |
320 return; | 404 return; |
321 } | 405 } |
322 VisitRRO(this, kMips64Dshl, node); | 406 VisitRRO(this, kMips64Dshl, node); |
323 } | 407 } |
324 | 408 |
325 | 409 |
326 void InstructionSelector::VisitWord64Shr(Node* node) { | 410 void InstructionSelector::VisitWord64Shr(Node* node) { |
| 411 Int64BinopMatcher m(node); |
| 412 if (m.left().IsWord64And() && m.right().HasValue()) { |
| 413 uint32_t lsb = m.right().Value() & 0x3f; |
| 414 Int64BinopMatcher mleft(m.left().node()); |
| 415 if (mleft.right().HasValue()) { |
| 416 // Select Dext for Shr(And(x, mask), imm) where the result of the mask is |
| 417 // shifted into the least-significant bits. |
| 418 uint64_t mask = (mleft.right().Value() >> lsb) << lsb; |
| 419 unsigned mask_width = base::bits::CountPopulation64(mask); |
| 420 unsigned mask_msb = base::bits::CountLeadingZeros64(mask); |
| 421 if ((mask_msb + mask_width + lsb) == 64) { |
| 422 Mips64OperandGenerator g(this); |
| 423 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask)); |
| 424 Emit(kMips64Dext, g.DefineAsRegister(node), |
| 425 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), |
| 426 g.TempImmediate(mask_width)); |
| 427 return; |
| 428 } |
| 429 } |
| 430 } |
327 VisitRRO(this, kMips64Dshr, node); | 431 VisitRRO(this, kMips64Dshr, node); |
328 } | 432 } |
329 | 433 |
330 | 434 |
331 void InstructionSelector::VisitWord64Sar(Node* node) { | 435 void InstructionSelector::VisitWord64Sar(Node* node) { |
332 VisitRRO(this, kMips64Dsar, node); | 436 VisitRRO(this, kMips64Dsar, node); |
333 } | 437 } |
334 | 438 |
335 | 439 |
336 void InstructionSelector::VisitWord32Ror(Node* node) { | 440 void InstructionSelector::VisitWord32Ror(Node* node) { |
(...skipping 1066 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1403 MachineOperatorBuilder::kFloat32Max | | 1507 MachineOperatorBuilder::kFloat32Max | |
1404 MachineOperatorBuilder::kFloat64RoundDown | | 1508 MachineOperatorBuilder::kFloat64RoundDown | |
1405 MachineOperatorBuilder::kFloat64RoundUp | | 1509 MachineOperatorBuilder::kFloat64RoundUp | |
1406 MachineOperatorBuilder::kFloat64RoundTruncate | | 1510 MachineOperatorBuilder::kFloat64RoundTruncate | |
1407 MachineOperatorBuilder::kFloat64RoundTiesEven; | 1511 MachineOperatorBuilder::kFloat64RoundTiesEven; |
1408 } | 1512 } |
1409 | 1513 |
1410 } // namespace compiler | 1514 } // namespace compiler |
1411 } // namespace internal | 1515 } // namespace internal |
1412 } // namespace v8 | 1516 } // namespace v8 |
OLD | NEW |