| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 356 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 367 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); | 367 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); |
| 368 | 368 |
| 369 // Checks that the two floating point numbers loaded into xmm0 and xmm1 | 369 // Checks that the two floating point numbers loaded into xmm0 and xmm1 |
| 370 // have int32 values. | 370 // have int32 values. |
| 371 static void CheckSSE2OperandsAreInt32(MacroAssembler* masm, | 371 static void CheckSSE2OperandsAreInt32(MacroAssembler* masm, |
| 372 Label* non_int32, | 372 Label* non_int32, |
| 373 Register scratch); | 373 Register scratch); |
| 374 }; | 374 }; |
| 375 | 375 |
| 376 | 376 |
| 377 // Get the integer part of a heap number. Surprisingly, all this bit twiddling |
| 378 // is faster than using the built-in instructions on floating point registers. |
| 379 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the |
| 380 // trashed registers. |
| 381 static void IntegerConvert(MacroAssembler* masm, |
| 382 Register source, |
| 383 TypeInfo type_info, |
| 384 bool use_sse3, |
| 385 Label* conversion_failure) { |
| 386 ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx)); |
| 387 Label done, right_exponent, normal_exponent; |
| 388 Register scratch = ebx; |
| 389 Register scratch2 = edi; |
| 390 if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) { |
| 391 CpuFeatures::Scope scope(SSE2); |
| 392 __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset)); |
| 393 return; |
| 394 } |
| 395 if (!type_info.IsInteger32() || !use_sse3) { |
| 396 // Get exponent word. |
| 397 __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); |
| 398 // Get exponent alone in scratch2. |
| 399 __ mov(scratch2, scratch); |
| 400 __ and_(scratch2, HeapNumber::kExponentMask); |
| 401 } |
| 402 if (use_sse3) { |
| 403 CpuFeatures::Scope scope(SSE3); |
| 404 if (!type_info.IsInteger32()) { |
| 405 // Check whether the exponent is too big for a 64 bit signed integer. |
| 406 static const uint32_t kTooBigExponent = |
| 407 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; |
| 408 __ cmp(Operand(scratch2), Immediate(kTooBigExponent)); |
| 409 __ j(greater_equal, conversion_failure); |
| 410 } |
| 411 // Load x87 register with heap number. |
| 412 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); |
| 413 // Reserve space for 64 bit answer. |
| 414 __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. |
| 415 // Do conversion, which cannot fail because we checked the exponent. |
| 416 __ fisttp_d(Operand(esp, 0)); |
| 417 __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. |
| 418 __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. |
| 419 } else { |
| 420 // Load ecx with zero. We use this either for the final shift or |
| 421 // for the answer. |
| 422 __ xor_(ecx, Operand(ecx)); |
| 423 // Check whether the exponent matches a 32 bit signed int that cannot be |
| 424 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the |
| 425 // exponent is 30 (biased). This is the exponent that we are fastest at and |
| 426 // also the highest exponent we can handle here. |
| 427 const uint32_t non_smi_exponent = |
| 428 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; |
| 429 __ cmp(Operand(scratch2), Immediate(non_smi_exponent)); |
| 430 // If we have a match of the int32-but-not-Smi exponent then skip some |
| 431 // logic. |
| 432 __ j(equal, &right_exponent); |
| 433 // If the exponent is higher than that then go to slow case. This catches |
| 434 // numbers that don't fit in a signed int32, infinities and NaNs. |
| 435 __ j(less, &normal_exponent); |
| 436 |
| 437 { |
| 438 // Handle a big exponent. The only reason we have this code is that the |
| 439 // >>> operator has a tendency to generate numbers with an exponent of 31. |
| 440 const uint32_t big_non_smi_exponent = |
| 441 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; |
| 442 __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent)); |
| 443 __ j(not_equal, conversion_failure); |
| 444 // We have the big exponent, typically from >>>. This means the number is |
| 445 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. |
| 446 __ mov(scratch2, scratch); |
| 447 __ and_(scratch2, HeapNumber::kMantissaMask); |
| 448 // Put back the implicit 1. |
| 449 __ or_(scratch2, 1 << HeapNumber::kExponentShift); |
| 450 // Shift up the mantissa bits to take up the space the exponent used to |
| 451 // take. We just orred in the implicit bit so that took care of one and |
| 452 // we want to use the full unsigned range so we subtract 1 bit from the |
| 453 // shift distance. |
| 454 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1; |
| 455 __ shl(scratch2, big_shift_distance); |
| 456 // Get the second half of the double. |
| 457 __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset)); |
| 458 // Shift down 21 bits to get the most significant 11 bits or the low |
| 459 // mantissa word. |
| 460 __ shr(ecx, 32 - big_shift_distance); |
| 461 __ or_(ecx, Operand(scratch2)); |
| 462 // We have the answer in ecx, but we may need to negate it. |
| 463 __ test(scratch, Operand(scratch)); |
| 464 __ j(positive, &done); |
| 465 __ neg(ecx); |
| 466 __ jmp(&done); |
| 467 } |
| 468 |
| 469 __ bind(&normal_exponent); |
| 470 // Exponent word in scratch, exponent part of exponent word in scratch2. |
| 471 // Zero in ecx. |
| 472 // We know the exponent is smaller than 30 (biased). If it is less than |
| 473 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie |
| 474 // it rounds to zero. |
| 475 const uint32_t zero_exponent = |
| 476 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; |
| 477 __ sub(Operand(scratch2), Immediate(zero_exponent)); |
| 478 // ecx already has a Smi zero. |
| 479 __ j(less, &done); |
| 480 |
| 481 // We have a shifted exponent between 0 and 30 in scratch2. |
| 482 __ shr(scratch2, HeapNumber::kExponentShift); |
| 483 __ mov(ecx, Immediate(30)); |
| 484 __ sub(ecx, Operand(scratch2)); |
| 485 |
| 486 __ bind(&right_exponent); |
| 487 // Here ecx is the shift, scratch is the exponent word. |
| 488 // Get the top bits of the mantissa. |
| 489 __ and_(scratch, HeapNumber::kMantissaMask); |
| 490 // Put back the implicit 1. |
| 491 __ or_(scratch, 1 << HeapNumber::kExponentShift); |
| 492 // Shift up the mantissa bits to take up the space the exponent used to |
| 493 // take. We have kExponentShift + 1 significant bits int he low end of the |
| 494 // word. Shift them to the top bits. |
| 495 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
| 496 __ shl(scratch, shift_distance); |
| 497 // Get the second half of the double. For some exponents we don't |
| 498 // actually need this because the bits get shifted out again, but |
| 499 // it's probably slower to test than just to do it. |
| 500 __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset)); |
| 501 // Shift down 22 bits to get the most significant 10 bits or the low |
| 502 // mantissa word. |
| 503 __ shr(scratch2, 32 - shift_distance); |
| 504 __ or_(scratch2, Operand(scratch)); |
| 505 // Move down according to the exponent. |
| 506 __ shr_cl(scratch2); |
| 507 // Now the unsigned answer is in scratch2. We need to move it to ecx and |
| 508 // we may need to fix the sign. |
| 509 NearLabel negative; |
| 510 __ xor_(ecx, Operand(ecx)); |
| 511 __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); |
| 512 __ j(greater, &negative); |
| 513 __ mov(ecx, scratch2); |
| 514 __ jmp(&done); |
| 515 __ bind(&negative); |
| 516 __ sub(ecx, Operand(scratch2)); |
| 517 __ bind(&done); |
| 518 } |
| 519 } |
| 520 |
| 521 |
| 522 Handle<Code> GetTypeRecordingUnaryOpStub(int key, |
| 523 TRUnaryOpIC::TypeInfo type_info) { |
| 524 TypeRecordingUnaryOpStub stub(key, type_info); |
| 525 return stub.GetCode(); |
| 526 } |
| 527 |
| 528 |
| 529 const char* TypeRecordingUnaryOpStub::GetName() { |
| 530 if (name_ != NULL) return name_; |
| 531 const int kMaxNameLength = 100; |
| 532 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( |
| 533 kMaxNameLength); |
| 534 if (name_ == NULL) return "OOM"; |
| 535 const char* op_name = Token::Name(op_); |
| 536 const char* overwrite_name; |
| 537 switch (mode_) { |
| 538 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; |
| 539 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; |
| 540 } |
| 541 |
| 542 OS::SNPrintF(Vector<char>(name_, kMaxNameLength), |
| 543 "TypeRecordingUnaryOpStub_%s_%s_%s", |
| 544 op_name, |
| 545 overwrite_name, |
| 546 TRUnaryOpIC::GetName(operand_type_)); |
| 547 return name_; |
| 548 } |
| 549 |
| 550 |
| 551 // TODO(svenpanne): Use virtual functions instead of switch. |
| 552 void TypeRecordingUnaryOpStub::Generate(MacroAssembler* masm) { |
| 553 switch (operand_type_) { |
| 554 case TRUnaryOpIC::UNINITIALIZED: |
| 555 GenerateTypeTransition(masm); |
| 556 break; |
| 557 case TRUnaryOpIC::SMI: |
| 558 GenerateSmiStub(masm); |
| 559 break; |
| 560 case TRUnaryOpIC::HEAP_NUMBER: |
| 561 GenerateHeapNumberStub(masm); |
| 562 break; |
| 563 case TRUnaryOpIC::GENERIC: |
| 564 GenerateGenericStub(masm); |
| 565 break; |
| 566 } |
| 567 } |
| 568 |
| 569 |
| 570 void TypeRecordingUnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 571 __ pop(ecx); // Save return address. |
| 572 __ push(eax); |
| 573 // the argument is now on top. |
| 574 // Push this stub's key. Although the operation and the type info are |
| 575 // encoded into the key, the encoding is opaque, so push them too. |
| 576 __ push(Immediate(Smi::FromInt(MinorKey()))); |
| 577 __ push(Immediate(Smi::FromInt(op_))); |
| 578 __ push(Immediate(Smi::FromInt(operand_type_))); |
| 579 |
| 580 __ push(ecx); // Push return address. |
| 581 |
| 582 // Patch the caller to an appropriate specialized stub and return the |
| 583 // operation result to the caller of the stub. |
| 584 __ TailCallExternalReference( |
| 585 ExternalReference(IC_Utility(IC::kTypeRecordingUnaryOp_Patch), |
| 586 masm->isolate()), |
| 587 4, |
| 588 1); |
| 589 } |
| 590 |
| 591 |
| 592 // TODO(svenpanne): Use virtual functions instead of switch. |
| 593 void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| 594 switch (op_) { |
| 595 case Token::SUB: |
| 596 GenerateSmiStubSub(masm); |
| 597 break; |
| 598 case Token::BIT_NOT: |
| 599 GenerateSmiStubBitNot(masm); |
| 600 break; |
| 601 default: |
| 602 UNREACHABLE(); |
| 603 } |
| 604 } |
| 605 |
| 606 |
| 607 void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) { |
| 608 NearLabel non_smi; |
| 609 Label undo, slow; |
| 610 GenerateSmiCodeSub(masm, &non_smi, &undo, &slow); |
| 611 __ bind(&undo); |
| 612 GenerateSmiCodeUndo(masm); |
| 613 __ bind(&non_smi); |
| 614 __ bind(&slow); |
| 615 GenerateTypeTransition(masm); |
| 616 } |
| 617 |
| 618 |
| 619 void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) { |
| 620 NearLabel non_smi; |
| 621 GenerateSmiCodeBitNot(masm, &non_smi); |
| 622 __ bind(&non_smi); |
| 623 GenerateTypeTransition(masm); |
| 624 } |
| 625 |
| 626 |
| 627 void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, |
| 628 NearLabel* non_smi, |
| 629 Label* undo, |
| 630 Label* slow) { |
| 631 // Check whether the value is a smi. |
| 632 __ test(eax, Immediate(kSmiTagMask)); |
| 633 __ j(not_zero, non_smi); |
| 634 |
| 635 // We can't handle -0 with smis, so use a type transition for that case. |
| 636 __ test(eax, Operand(eax)); |
| 637 __ j(zero, slow); |
| 638 |
| 639 // Try optimistic subtraction '0 - value', saving operand in eax for undo. |
| 640 __ mov(edx, Operand(eax)); |
| 641 __ Set(eax, Immediate(0)); |
| 642 __ sub(eax, Operand(edx)); |
| 643 __ j(overflow, undo); |
| 644 __ ret(0); |
| 645 } |
| 646 |
| 647 |
| 648 void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, |
| 649 NearLabel* non_smi) { |
| 650 // Check whether the value is a smi. |
| 651 __ test(eax, Immediate(kSmiTagMask)); |
| 652 __ j(not_zero, non_smi); |
| 653 |
| 654 // Flip bits and revert inverted smi-tag. |
| 655 __ not_(eax); |
| 656 __ and_(eax, ~kSmiTagMask); |
| 657 __ ret(0); |
| 658 } |
| 659 |
| 660 |
| 661 void TypeRecordingUnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) { |
| 662 __ mov(eax, Operand(edx)); |
| 663 } |
| 664 |
| 665 |
| 666 // TODO(svenpanne): Use virtual functions instead of switch. |
| 667 void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
| 668 switch (op_) { |
| 669 case Token::SUB: |
| 670 GenerateHeapNumberStubSub(masm); |
| 671 break; |
| 672 case Token::BIT_NOT: |
| 673 GenerateHeapNumberStubBitNot(masm); |
| 674 break; |
| 675 default: |
| 676 UNREACHABLE(); |
| 677 } |
| 678 } |
| 679 |
| 680 |
| 681 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { |
| 682 NearLabel non_smi; |
| 683 Label undo, slow; |
| 684 GenerateSmiCodeSub(masm, &non_smi, &undo, &slow); |
| 685 __ bind(&non_smi); |
| 686 GenerateHeapNumberCodeSub(masm, &slow); |
| 687 __ bind(&undo); |
| 688 GenerateSmiCodeUndo(masm); |
| 689 __ bind(&slow); |
| 690 GenerateTypeTransition(masm); |
| 691 } |
| 692 |
| 693 |
| 694 void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot( |
| 695 MacroAssembler* masm) { |
| 696 NearLabel non_smi; |
| 697 Label slow; |
| 698 GenerateSmiCodeBitNot(masm, &non_smi); |
| 699 __ bind(&non_smi); |
| 700 GenerateHeapNumberCodeBitNot(masm, &slow); |
| 701 __ bind(&slow); |
| 702 GenerateTypeTransition(masm); |
| 703 } |
| 704 |
| 705 |
| 706 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, |
| 707 Label* slow) { |
| 708 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); |
| 709 __ cmp(edx, masm->isolate()->factory()->heap_number_map()); |
| 710 __ j(not_equal, slow); |
| 711 |
| 712 if (mode_ == UNARY_OVERWRITE) { |
| 713 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); |
| 714 __ xor_(edx, HeapNumber::kSignMask); // Flip sign. |
| 715 __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx); |
| 716 } else { |
| 717 __ mov(edx, Operand(eax)); |
| 718 // edx: operand |
| 719 |
| 720 Label slow_allocate_heapnumber, heapnumber_allocated; |
| 721 __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber); |
| 722 __ jmp(&heapnumber_allocated); |
| 723 |
| 724 __ bind(&slow_allocate_heapnumber); |
| 725 __ EnterInternalFrame(); |
| 726 __ push(edx); |
| 727 __ CallRuntime(Runtime::kNumberAlloc, 0); |
| 728 __ pop(edx); |
| 729 __ LeaveInternalFrame(); |
| 730 |
| 731 __ bind(&heapnumber_allocated); |
| 732 // eax: allocated 'empty' number |
| 733 __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset)); |
| 734 __ xor_(ecx, HeapNumber::kSignMask); // Flip sign. |
| 735 __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx); |
| 736 __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset)); |
| 737 __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx); |
| 738 } |
| 739 __ ret(0); |
| 740 } |
| 741 |
| 742 |
| 743 void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeBitNot( |
| 744 MacroAssembler* masm, |
| 745 Label* slow) { |
| 746 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); |
| 747 __ cmp(edx, masm->isolate()->factory()->heap_number_map()); |
| 748 __ j(not_equal, slow); |
| 749 |
| 750 // Convert the heap number in eax to an untagged integer in ecx. |
| 751 IntegerConvert(masm, eax, TypeInfo::Unknown(), CpuFeatures::IsSupported(SSE3), |
| 752 slow); |
| 753 |
| 754 // Do the bitwise operation and check if the result fits in a smi. |
| 755 NearLabel try_float; |
| 756 __ not_(ecx); |
| 757 __ cmp(ecx, 0xc0000000); |
| 758 __ j(sign, &try_float); |
| 759 |
| 760 // Tag the result as a smi and we're done. |
| 761 STATIC_ASSERT(kSmiTagSize == 1); |
| 762 __ lea(eax, Operand(ecx, times_2, kSmiTag)); |
| 763 __ ret(0); |
| 764 |
| 765 // Try to store the result in a heap number. |
| 766 __ bind(&try_float); |
| 767 if (mode_ == UNARY_NO_OVERWRITE) { |
| 768 Label slow_allocate_heapnumber, heapnumber_allocated; |
| 769 __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber); |
| 770 __ jmp(&heapnumber_allocated); |
| 771 |
| 772 __ bind(&slow_allocate_heapnumber); |
| 773 __ EnterInternalFrame(); |
| 774 __ push(ecx); |
| 775 __ CallRuntime(Runtime::kNumberAlloc, 0); |
| 776 __ pop(ecx); |
| 777 __ LeaveInternalFrame(); |
| 778 |
| 779 __ bind(&heapnumber_allocated); |
| 780 } |
| 781 if (CpuFeatures::IsSupported(SSE2)) { |
| 782 CpuFeatures::Scope use_sse2(SSE2); |
| 783 __ cvtsi2sd(xmm0, Operand(ecx)); |
| 784 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
| 785 } else { |
| 786 __ push(ecx); |
| 787 __ fild_s(Operand(esp, 0)); |
| 788 __ pop(ecx); |
| 789 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
| 790 } |
| 791 __ ret(0); |
| 792 } |
| 793 |
| 794 |
| 795 // TODO(svenpanne): Use virtual functions instead of switch. |
| 796 void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { |
| 797 switch (op_) { |
| 798 case Token::SUB: |
| 799 GenerateGenericStubSub(masm); |
| 800 break; |
| 801 case Token::BIT_NOT: |
| 802 GenerateGenericStubBitNot(masm); |
| 803 break; |
| 804 default: |
| 805 UNREACHABLE(); |
| 806 } |
| 807 } |
| 808 |
| 809 |
| 810 void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) { |
| 811 NearLabel non_smi; |
| 812 Label undo, slow; |
| 813 GenerateSmiCodeSub(masm, &non_smi, &undo, &slow); |
| 814 __ bind(&non_smi); |
| 815 GenerateHeapNumberCodeSub(masm, &slow); |
| 816 __ bind(&undo); |
| 817 GenerateSmiCodeUndo(masm); |
| 818 __ bind(&slow); |
| 819 GenerateGenericCodeFallback(masm); |
| 820 } |
| 821 |
| 822 |
| 823 void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) { |
| 824 NearLabel non_smi; |
| 825 Label slow; |
| 826 GenerateSmiCodeBitNot(masm, &non_smi); |
| 827 __ bind(&non_smi); |
| 828 GenerateHeapNumberCodeBitNot(masm, &slow); |
| 829 __ bind(&slow); |
| 830 GenerateGenericCodeFallback(masm); |
| 831 } |
| 832 |
| 833 |
| 834 void TypeRecordingUnaryOpStub::GenerateGenericCodeFallback( |
| 835 MacroAssembler* masm) { |
| 836 // Handle the slow case by jumping to the corresponding JavaScript builtin. |
| 837 __ pop(ecx); // pop return address. |
| 838 __ push(eax); |
| 839 __ push(ecx); // push return address |
| 840 switch (op_) { |
| 841 case Token::SUB: |
| 842 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); |
| 843 break; |
| 844 case Token::BIT_NOT: |
| 845 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); |
| 846 break; |
| 847 default: |
| 848 UNREACHABLE(); |
| 849 } |
| 850 } |
| 851 |
| 852 |
| 377 Handle<Code> GetTypeRecordingBinaryOpStub(int key, | 853 Handle<Code> GetTypeRecordingBinaryOpStub(int key, |
| 378 TRBinaryOpIC::TypeInfo type_info, | 854 TRBinaryOpIC::TypeInfo type_info, |
| 379 TRBinaryOpIC::TypeInfo result_type_info) { | 855 TRBinaryOpIC::TypeInfo result_type_info) { |
| 380 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); | 856 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); |
| 381 return stub.GetCode(); | 857 return stub.GetCode(); |
| 382 } | 858 } |
| 383 | 859 |
| 384 | 860 |
| 385 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 861 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 386 __ pop(ecx); // Save return address. | 862 __ pop(ecx); // Save return address. |
| (...skipping 1540 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1927 __ bind(&done); | 2403 __ bind(&done); |
| 1928 } else { | 2404 } else { |
| 1929 ASSERT(type_ == TranscendentalCache::LOG); | 2405 ASSERT(type_ == TranscendentalCache::LOG); |
| 1930 __ fldln2(); | 2406 __ fldln2(); |
| 1931 __ fxch(); | 2407 __ fxch(); |
| 1932 __ fyl2x(); | 2408 __ fyl2x(); |
| 1933 } | 2409 } |
| 1934 } | 2410 } |
| 1935 | 2411 |
| 1936 | 2412 |
| 1937 // Get the integer part of a heap number. Surprisingly, all this bit twiddling | |
| 1938 // is faster than using the built-in instructions on floating point registers. | |
| 1939 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the | |
| 1940 // trashed registers. | |
| 1941 void IntegerConvert(MacroAssembler* masm, | |
| 1942 Register source, | |
| 1943 TypeInfo type_info, | |
| 1944 bool use_sse3, | |
| 1945 Label* conversion_failure) { | |
| 1946 ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx)); | |
| 1947 Label done, right_exponent, normal_exponent; | |
| 1948 Register scratch = ebx; | |
| 1949 Register scratch2 = edi; | |
| 1950 if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) { | |
| 1951 CpuFeatures::Scope scope(SSE2); | |
| 1952 __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset)); | |
| 1953 return; | |
| 1954 } | |
| 1955 if (!type_info.IsInteger32() || !use_sse3) { | |
| 1956 // Get exponent word. | |
| 1957 __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); | |
| 1958 // Get exponent alone in scratch2. | |
| 1959 __ mov(scratch2, scratch); | |
| 1960 __ and_(scratch2, HeapNumber::kExponentMask); | |
| 1961 } | |
| 1962 if (use_sse3) { | |
| 1963 CpuFeatures::Scope scope(SSE3); | |
| 1964 if (!type_info.IsInteger32()) { | |
| 1965 // Check whether the exponent is too big for a 64 bit signed integer. | |
| 1966 static const uint32_t kTooBigExponent = | |
| 1967 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; | |
| 1968 __ cmp(Operand(scratch2), Immediate(kTooBigExponent)); | |
| 1969 __ j(greater_equal, conversion_failure); | |
| 1970 } | |
| 1971 // Load x87 register with heap number. | |
| 1972 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); | |
| 1973 // Reserve space for 64 bit answer. | |
| 1974 __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. | |
| 1975 // Do conversion, which cannot fail because we checked the exponent. | |
| 1976 __ fisttp_d(Operand(esp, 0)); | |
| 1977 __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. | |
| 1978 __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. | |
| 1979 } else { | |
| 1980 // Load ecx with zero. We use this either for the final shift or | |
| 1981 // for the answer. | |
| 1982 __ xor_(ecx, Operand(ecx)); | |
| 1983 // Check whether the exponent matches a 32 bit signed int that cannot be | |
| 1984 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the | |
| 1985 // exponent is 30 (biased). This is the exponent that we are fastest at and | |
| 1986 // also the highest exponent we can handle here. | |
| 1987 const uint32_t non_smi_exponent = | |
| 1988 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; | |
| 1989 __ cmp(Operand(scratch2), Immediate(non_smi_exponent)); | |
| 1990 // If we have a match of the int32-but-not-Smi exponent then skip some | |
| 1991 // logic. | |
| 1992 __ j(equal, &right_exponent); | |
| 1993 // If the exponent is higher than that then go to slow case. This catches | |
| 1994 // numbers that don't fit in a signed int32, infinities and NaNs. | |
| 1995 __ j(less, &normal_exponent); | |
| 1996 | |
| 1997 { | |
| 1998 // Handle a big exponent. The only reason we have this code is that the | |
| 1999 // >>> operator has a tendency to generate numbers with an exponent of 31. | |
| 2000 const uint32_t big_non_smi_exponent = | |
| 2001 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; | |
| 2002 __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent)); | |
| 2003 __ j(not_equal, conversion_failure); | |
| 2004 // We have the big exponent, typically from >>>. This means the number is | |
| 2005 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. | |
| 2006 __ mov(scratch2, scratch); | |
| 2007 __ and_(scratch2, HeapNumber::kMantissaMask); | |
| 2008 // Put back the implicit 1. | |
| 2009 __ or_(scratch2, 1 << HeapNumber::kExponentShift); | |
| 2010 // Shift up the mantissa bits to take up the space the exponent used to | |
| 2011 // take. We just orred in the implicit bit so that took care of one and | |
| 2012 // we want to use the full unsigned range so we subtract 1 bit from the | |
| 2013 // shift distance. | |
| 2014 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1; | |
| 2015 __ shl(scratch2, big_shift_distance); | |
| 2016 // Get the second half of the double. | |
| 2017 __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset)); | |
| 2018 // Shift down 21 bits to get the most significant 11 bits or the low | |
| 2019 // mantissa word. | |
| 2020 __ shr(ecx, 32 - big_shift_distance); | |
| 2021 __ or_(ecx, Operand(scratch2)); | |
| 2022 // We have the answer in ecx, but we may need to negate it. | |
| 2023 __ test(scratch, Operand(scratch)); | |
| 2024 __ j(positive, &done); | |
| 2025 __ neg(ecx); | |
| 2026 __ jmp(&done); | |
| 2027 } | |
| 2028 | |
| 2029 __ bind(&normal_exponent); | |
| 2030 // Exponent word in scratch, exponent part of exponent word in scratch2. | |
| 2031 // Zero in ecx. | |
| 2032 // We know the exponent is smaller than 30 (biased). If it is less than | |
| 2033 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie | |
| 2034 // it rounds to zero. | |
| 2035 const uint32_t zero_exponent = | |
| 2036 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; | |
| 2037 __ sub(Operand(scratch2), Immediate(zero_exponent)); | |
| 2038 // ecx already has a Smi zero. | |
| 2039 __ j(less, &done); | |
| 2040 | |
| 2041 // We have a shifted exponent between 0 and 30 in scratch2. | |
| 2042 __ shr(scratch2, HeapNumber::kExponentShift); | |
| 2043 __ mov(ecx, Immediate(30)); | |
| 2044 __ sub(ecx, Operand(scratch2)); | |
| 2045 | |
| 2046 __ bind(&right_exponent); | |
| 2047 // Here ecx is the shift, scratch is the exponent word. | |
| 2048 // Get the top bits of the mantissa. | |
| 2049 __ and_(scratch, HeapNumber::kMantissaMask); | |
| 2050 // Put back the implicit 1. | |
| 2051 __ or_(scratch, 1 << HeapNumber::kExponentShift); | |
| 2052 // Shift up the mantissa bits to take up the space the exponent used to | |
| 2053 // take. We have kExponentShift + 1 significant bits int he low end of the | |
| 2054 // word. Shift them to the top bits. | |
| 2055 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | |
| 2056 __ shl(scratch, shift_distance); | |
| 2057 // Get the second half of the double. For some exponents we don't | |
| 2058 // actually need this because the bits get shifted out again, but | |
| 2059 // it's probably slower to test than just to do it. | |
| 2060 __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset)); | |
| 2061 // Shift down 22 bits to get the most significant 10 bits or the low | |
| 2062 // mantissa word. | |
| 2063 __ shr(scratch2, 32 - shift_distance); | |
| 2064 __ or_(scratch2, Operand(scratch)); | |
| 2065 // Move down according to the exponent. | |
| 2066 __ shr_cl(scratch2); | |
| 2067 // Now the unsigned answer is in scratch2. We need to move it to ecx and | |
| 2068 // we may need to fix the sign. | |
| 2069 NearLabel negative; | |
| 2070 __ xor_(ecx, Operand(ecx)); | |
| 2071 __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); | |
| 2072 __ j(greater, &negative); | |
| 2073 __ mov(ecx, scratch2); | |
| 2074 __ jmp(&done); | |
| 2075 __ bind(&negative); | |
| 2076 __ sub(ecx, Operand(scratch2)); | |
| 2077 __ bind(&done); | |
| 2078 } | |
| 2079 } | |
| 2080 | |
| 2081 | |
| 2082 // Input: edx, eax are the left and right objects of a bit op. | 2413 // Input: edx, eax are the left and right objects of a bit op. |
| 2083 // Output: eax, ecx are left and right integers for a bit op. | 2414 // Output: eax, ecx are left and right integers for a bit op. |
| 2084 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm, | 2415 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm, |
| 2085 TypeInfo type_info, | 2416 TypeInfo type_info, |
| 2086 bool use_sse3, | 2417 bool use_sse3, |
| 2087 Label* conversion_failure) { | 2418 Label* conversion_failure) { |
| 2088 // Check float operands. | 2419 // Check float operands. |
| 2089 Label arg1_is_object, check_undefined_arg1; | 2420 Label arg1_is_object, check_undefined_arg1; |
| 2090 Label arg2_is_object, check_undefined_arg2; | 2421 Label arg2_is_object, check_undefined_arg2; |
| 2091 Label load_arg2, done; | 2422 Label load_arg2, done; |
| (...skipping 3569 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5661 // Do a tail call to the rewritten stub. | 5992 // Do a tail call to the rewritten stub. |
| 5662 __ jmp(Operand(edi)); | 5993 __ jmp(Operand(edi)); |
| 5663 } | 5994 } |
| 5664 | 5995 |
| 5665 | 5996 |
| 5666 #undef __ | 5997 #undef __ |
| 5667 | 5998 |
| 5668 } } // namespace v8::internal | 5999 } } // namespace v8::internal |
| 5669 | 6000 |
| 5670 #endif // V8_TARGET_ARCH_IA32 | 6001 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |