Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 25494007: Reland "Hydrogenisation of binops" (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: rebase Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/v8-counters.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after
148 Isolate* isolate, 148 Isolate* isolate,
149 CodeStubInterfaceDescriptor* descriptor) { 149 CodeStubInterfaceDescriptor* descriptor) {
150 static Register registers[] = { rax, rbx }; 150 static Register registers[] = { rax, rbx };
151 descriptor->register_param_count_ = 2; 151 descriptor->register_param_count_ = 2;
152 descriptor->register_params_ = registers; 152 descriptor->register_params_ = registers;
153 descriptor->deoptimization_handler_ = 153 descriptor->deoptimization_handler_ =
154 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; 154 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
155 } 155 }
156 156
157 157
158 void BinaryOpStub::InitializeInterfaceDescriptor(
159 Isolate* isolate,
160 CodeStubInterfaceDescriptor* descriptor) {
161 static Register registers[] = { rdx, rax };
162 descriptor->register_param_count_ = 2;
163 descriptor->register_params_ = registers;
164 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
165 descriptor->SetMissHandler(
166 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
167 }
168
169
158 static void InitializeArrayConstructorDescriptor( 170 static void InitializeArrayConstructorDescriptor(
159 Isolate* isolate, 171 Isolate* isolate,
160 CodeStubInterfaceDescriptor* descriptor, 172 CodeStubInterfaceDescriptor* descriptor,
161 int constant_stack_parameter_count) { 173 int constant_stack_parameter_count) {
162 // register state 174 // register state
163 // rax -- number of arguments 175 // rax -- number of arguments
164 // rdi -- function 176 // rdi -- function
165 // rbx -- type info cell with elements kind 177 // rbx -- type info cell with elements kind
166 static Register registers[] = { rdi, rbx }; 178 static Register registers[] = { rdi, rbx };
167 descriptor->register_param_count_ = 2; 179 descriptor->register_param_count_ = 2;
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after
440 class FloatingPointHelper : public AllStatic { 452 class FloatingPointHelper : public AllStatic {
441 public: 453 public:
442 enum ConvertUndefined { 454 enum ConvertUndefined {
443 CONVERT_UNDEFINED_TO_ZERO, 455 CONVERT_UNDEFINED_TO_ZERO,
444 BAILOUT_ON_UNDEFINED 456 BAILOUT_ON_UNDEFINED
445 }; 457 };
446 // Load the operands from rdx and rax into xmm0 and xmm1, as doubles. 458 // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
447 // If the operands are not both numbers, jump to not_numbers. 459 // If the operands are not both numbers, jump to not_numbers.
448 // Leaves rdx and rax unchanged. SmiOperands assumes both are smis. 460 // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
449 // NumberOperands assumes both are smis or heap numbers. 461 // NumberOperands assumes both are smis or heap numbers.
450 static void LoadSSE2SmiOperands(MacroAssembler* masm);
451 static void LoadSSE2UnknownOperands(MacroAssembler* masm, 462 static void LoadSSE2UnknownOperands(MacroAssembler* masm,
452 Label* not_numbers); 463 Label* not_numbers);
453
454 // Takes the operands in rdx and rax and loads them as integers in rax
455 // and rcx.
456 static void LoadAsIntegers(MacroAssembler* masm,
457 Label* operand_conversion_failure,
458 Register heap_number_map);
459
460 // Tries to convert two values to smis losslessly.
461 // This fails if either argument is not a Smi nor a HeapNumber,
462 // or if it's a HeapNumber with a value that can't be converted
463 // losslessly to a Smi. In that case, control transitions to the
464 // on_not_smis label.
465 // On success, either control goes to the on_success label (if one is
466 // provided), or it falls through at the end of the code (if on_success
467 // is NULL).
468 // On success, both first and second holds Smi tagged values.
469 // One of first or second must be non-Smi when entering.
470 static void NumbersToSmis(MacroAssembler* masm,
471 Register first,
472 Register second,
473 Register scratch1,
474 Register scratch2,
475 Register scratch3,
476 Label* on_success,
477 Label* on_not_smis,
478 ConvertUndefined convert_undefined);
479 }; 464 };
480 465
481 466
482 void DoubleToIStub::Generate(MacroAssembler* masm) { 467 void DoubleToIStub::Generate(MacroAssembler* masm) {
483 Register input_reg = this->source(); 468 Register input_reg = this->source();
484 Register final_result_reg = this->destination(); 469 Register final_result_reg = this->destination();
485 ASSERT(is_truncating()); 470 ASSERT(is_truncating());
486 471
487 Label check_negative, process_64_bits, done; 472 Label check_negative, process_64_bits, done;
488 473
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
556 if (!final_result_reg.is(result_reg)) { 541 if (!final_result_reg.is(result_reg)) {
557 ASSERT(final_result_reg.is(rcx)); 542 ASSERT(final_result_reg.is(rcx));
558 __ movl(final_result_reg, result_reg); 543 __ movl(final_result_reg, result_reg);
559 } 544 }
560 __ pop(save_reg); 545 __ pop(save_reg);
561 __ pop(scratch1); 546 __ pop(scratch1);
562 __ ret(0); 547 __ ret(0);
563 } 548 }
564 549
565 550
566 void BinaryOpStub::Initialize() {}
567
568
569 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
570 __ PopReturnAddressTo(rcx);
571 __ push(rdx);
572 __ push(rax);
573 // Left and right arguments are now on top.
574 __ Push(Smi::FromInt(MinorKey()));
575
576 __ PushReturnAddressFrom(rcx);
577
578 // Patch the caller to an appropriate specialized stub and return the
579 // operation result to the caller of the stub.
580 __ TailCallExternalReference(
581 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
582 masm->isolate()),
583 3,
584 1);
585 }
586
587
588 static void BinaryOpStub_GenerateSmiCode(
589 MacroAssembler* masm,
590 Label* slow,
591 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
592 Token::Value op) {
593
594 // Arguments to BinaryOpStub are in rdx and rax.
595 const Register left = rdx;
596 const Register right = rax;
597
598 // We only generate heapnumber answers for overflowing calculations
599 // for the four basic arithmetic operations and logical right shift by 0.
600 bool generate_inline_heapnumber_results =
601 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
602 (op == Token::ADD || op == Token::SUB ||
603 op == Token::MUL || op == Token::DIV || op == Token::SHR);
604
605 // Smi check of both operands. If op is BIT_OR, the check is delayed
606 // until after the OR operation.
607 Label not_smis;
608 Label use_fp_on_smis;
609 Label fail;
610
611 if (op != Token::BIT_OR) {
612 Comment smi_check_comment(masm, "-- Smi check arguments");
613 __ JumpIfNotBothSmi(left, right, &not_smis);
614 }
615
616 Label smi_values;
617 __ bind(&smi_values);
618 // Perform the operation.
619 Comment perform_smi(masm, "-- Perform smi operation");
620 switch (op) {
621 case Token::ADD:
622 ASSERT(right.is(rax));
623 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
624 break;
625
626 case Token::SUB:
627 __ SmiSub(left, left, right, &use_fp_on_smis);
628 __ movq(rax, left);
629 break;
630
631 case Token::MUL:
632 ASSERT(right.is(rax));
633 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
634 break;
635
636 case Token::DIV:
637 // SmiDiv will not accept left in rdx or right in rax.
638 __ movq(rbx, rax);
639 __ movq(rcx, rdx);
640 __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
641 break;
642
643 case Token::MOD:
644 // SmiMod will not accept left in rdx or right in rax.
645 __ movq(rbx, rax);
646 __ movq(rcx, rdx);
647 __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
648 break;
649
650 case Token::BIT_OR: {
651 ASSERT(right.is(rax));
652 __ SmiOrIfSmis(right, right, left, &not_smis); // BIT_OR is commutative.
653 break;
654 }
655 case Token::BIT_XOR:
656 ASSERT(right.is(rax));
657 __ SmiXor(right, right, left); // BIT_XOR is commutative.
658 break;
659
660 case Token::BIT_AND:
661 ASSERT(right.is(rax));
662 __ SmiAnd(right, right, left); // BIT_AND is commutative.
663 break;
664
665 case Token::SHL:
666 __ SmiShiftLeft(left, left, right);
667 __ movq(rax, left);
668 break;
669
670 case Token::SAR:
671 __ SmiShiftArithmeticRight(left, left, right);
672 __ movq(rax, left);
673 break;
674
675 case Token::SHR:
676 __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
677 __ movq(rax, left);
678 break;
679
680 default:
681 UNREACHABLE();
682 }
683
684 // 5. Emit return of result in rax. Some operations have registers pushed.
685 __ ret(0);
686
687 if (use_fp_on_smis.is_linked()) {
688 // 6. For some operations emit inline code to perform floating point
689 // operations on known smis (e.g., if the result of the operation
690 // overflowed the smi range).
691 __ bind(&use_fp_on_smis);
692 if (op == Token::DIV || op == Token::MOD) {
693 // Restore left and right to rdx and rax.
694 __ movq(rdx, rcx);
695 __ movq(rax, rbx);
696 }
697
698 if (generate_inline_heapnumber_results) {
699 __ AllocateHeapNumber(rcx, rbx, slow);
700 Comment perform_float(masm, "-- Perform float operation on smis");
701 if (op == Token::SHR) {
702 __ SmiToInteger32(left, left);
703 __ cvtqsi2sd(xmm0, left);
704 } else {
705 FloatingPointHelper::LoadSSE2SmiOperands(masm);
706 switch (op) {
707 case Token::ADD: __ addsd(xmm0, xmm1); break;
708 case Token::SUB: __ subsd(xmm0, xmm1); break;
709 case Token::MUL: __ mulsd(xmm0, xmm1); break;
710 case Token::DIV: __ divsd(xmm0, xmm1); break;
711 default: UNREACHABLE();
712 }
713 }
714 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
715 __ movq(rax, rcx);
716 __ ret(0);
717 } else {
718 __ jmp(&fail);
719 }
720 }
721
722 // 7. Non-smi operands reach the end of the code generated by
723 // GenerateSmiCode, and fall through to subsequent code,
724 // with the operands in rdx and rax.
725 // But first we check if non-smi values are HeapNumbers holding
726 // values that could be smi.
727 __ bind(&not_smis);
728 Comment done_comment(masm, "-- Enter non-smi code");
729 FloatingPointHelper::ConvertUndefined convert_undefined =
730 FloatingPointHelper::BAILOUT_ON_UNDEFINED;
731 // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
732 if (op == Token::BIT_AND ||
733 op == Token::BIT_OR ||
734 op == Token::BIT_XOR ||
735 op == Token::SAR ||
736 op == Token::SHL ||
737 op == Token::SHR) {
738 convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
739 }
740 FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
741 &smi_values, &fail, convert_undefined);
742 __ jmp(&smi_values);
743 __ bind(&fail);
744 }
745
746
747 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
748 Label* alloc_failure,
749 OverwriteMode mode);
750
751
752 static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
753 Label* allocation_failure,
754 Label* non_numeric_failure,
755 Token::Value op,
756 OverwriteMode mode) {
757 switch (op) {
758 case Token::ADD:
759 case Token::SUB:
760 case Token::MUL:
761 case Token::DIV: {
762 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
763
764 switch (op) {
765 case Token::ADD: __ addsd(xmm0, xmm1); break;
766 case Token::SUB: __ subsd(xmm0, xmm1); break;
767 case Token::MUL: __ mulsd(xmm0, xmm1); break;
768 case Token::DIV: __ divsd(xmm0, xmm1); break;
769 default: UNREACHABLE();
770 }
771 BinaryOpStub_GenerateHeapResultAllocation(
772 masm, allocation_failure, mode);
773 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
774 __ ret(0);
775 break;
776 }
777 case Token::MOD: {
778 // For MOD we jump to the allocation_failure label, to call runtime.
779 __ jmp(allocation_failure);
780 break;
781 }
782 case Token::BIT_OR:
783 case Token::BIT_AND:
784 case Token::BIT_XOR:
785 case Token::SAR:
786 case Token::SHL:
787 case Token::SHR: {
788 Label non_smi_shr_result;
789 Register heap_number_map = r9;
790 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
791 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
792 heap_number_map);
793 switch (op) {
794 case Token::BIT_OR: __ orl(rax, rcx); break;
795 case Token::BIT_AND: __ andl(rax, rcx); break;
796 case Token::BIT_XOR: __ xorl(rax, rcx); break;
797 case Token::SAR: __ sarl_cl(rax); break;
798 case Token::SHL: __ shll_cl(rax); break;
799 case Token::SHR: {
800 __ shrl_cl(rax);
801 // Check if result is negative. This can only happen for a shift
802 // by zero.
803 __ testl(rax, rax);
804 __ j(negative, &non_smi_shr_result);
805 break;
806 }
807 default: UNREACHABLE();
808 }
809 STATIC_ASSERT(kSmiValueSize == 32);
810 // Tag smi result and return.
811 __ Integer32ToSmi(rax, rax);
812 __ Ret();
813
814 // Logical shift right can produce an unsigned int32 that is not
815 // an int32, and so is not in the smi range. Allocate a heap number
816 // in that case.
817 if (op == Token::SHR) {
818 __ bind(&non_smi_shr_result);
819 Label allocation_failed;
820 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
821 // Allocate heap number in new space.
822 // Not using AllocateHeapNumber macro in order to reuse
823 // already loaded heap_number_map.
824 __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed,
825 TAG_OBJECT);
826 // Set the map.
827 __ AssertRootValue(heap_number_map,
828 Heap::kHeapNumberMapRootIndex,
829 kHeapNumberMapRegisterClobbered);
830 __ movq(FieldOperand(rax, HeapObject::kMapOffset),
831 heap_number_map);
832 __ cvtqsi2sd(xmm0, rbx);
833 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
834 __ Ret();
835
836 __ bind(&allocation_failed);
837 // We need tagged values in rdx and rax for the following code,
838 // not int32 in rax and rcx.
839 __ Integer32ToSmi(rax, rcx);
840 __ Integer32ToSmi(rdx, rbx);
841 __ jmp(allocation_failure);
842 }
843 break;
844 }
845 default: UNREACHABLE(); break;
846 }
847 // No fall-through from this generated code.
848 if (FLAG_debug_code) {
849 __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode);
850 }
851 }
852
853
854 static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn(
855 MacroAssembler* masm) {
856 // Push arguments, but ensure they are under the return address
857 // for a tail call.
858 __ PopReturnAddressTo(rcx);
859 __ push(rdx);
860 __ push(rax);
861 __ PushReturnAddressFrom(rcx);
862 }
863
864
865 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
866 ASSERT(op_ == Token::ADD);
867 Label left_not_string, call_runtime;
868
869 // Registers containing left and right operands respectively.
870 Register left = rdx;
871 Register right = rax;
872
873 // Test if left operand is a string.
874 __ JumpIfSmi(left, &left_not_string, Label::kNear);
875 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
876 __ j(above_equal, &left_not_string, Label::kNear);
877 StringAddStub string_add_left_stub(
878 (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
879 BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
880 __ TailCallStub(&string_add_left_stub);
881
882 // Left operand is not a string, test right.
883 __ bind(&left_not_string);
884 __ JumpIfSmi(right, &call_runtime, Label::kNear);
885 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
886 __ j(above_equal, &call_runtime, Label::kNear);
887
888 StringAddStub string_add_right_stub(
889 (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
890 BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
891 __ TailCallStub(&string_add_right_stub);
892
893 // Neither argument is a string.
894 __ bind(&call_runtime);
895 }
896
897
898 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
899 Label right_arg_changed, call_runtime;
900
901 if (op_ == Token::MOD && encoded_right_arg_.has_value) {
902 // It is guaranteed that the value will fit into a Smi, because if it
903 // didn't, we wouldn't be here, see BinaryOp_Patch.
904 __ Cmp(rax, Smi::FromInt(fixed_right_arg_value()));
905 __ j(not_equal, &right_arg_changed);
906 }
907
908 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
909 result_type_ == BinaryOpIC::SMI) {
910 // Only allow smi results.
911 BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
912 } else {
913 // Allow heap number result and don't make a transition if a heap number
914 // cannot be allocated.
915 BinaryOpStub_GenerateSmiCode(
916 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
917 }
918
919 // Code falls through if the result is not returned as either a smi or heap
920 // number.
921 __ bind(&right_arg_changed);
922 GenerateTypeTransition(masm);
923
924 if (call_runtime.is_linked()) {
925 __ bind(&call_runtime);
926 {
927 FrameScope scope(masm, StackFrame::INTERNAL);
928 GenerateRegisterArgsPush(masm);
929 GenerateCallRuntime(masm);
930 }
931 __ Ret();
932 }
933 }
934
935
936 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
937 // The int32 case is identical to the Smi case. We avoid creating this
938 // ic state on x64.
939 UNREACHABLE();
940 }
941
942
943 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
944 Label call_runtime;
945 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
946 ASSERT(op_ == Token::ADD);
947 // If both arguments are strings, call the string add stub.
948 // Otherwise, do a transition.
949
950 // Registers containing left and right operands respectively.
951 Register left = rdx;
952 Register right = rax;
953
954 // Test if left operand is a string.
955 __ JumpIfSmi(left, &call_runtime);
956 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
957 __ j(above_equal, &call_runtime);
958
959 // Test if right operand is a string.
960 __ JumpIfSmi(right, &call_runtime);
961 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
962 __ j(above_equal, &call_runtime);
963
964 StringAddStub string_add_stub(
965 (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
966 BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
967 __ TailCallStub(&string_add_stub);
968
969 __ bind(&call_runtime);
970 GenerateTypeTransition(masm);
971 }
972
973
974 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
975 Label call_runtime;
976
977 if (op_ == Token::ADD) {
978 // Handle string addition here, because it is the only operation
979 // that does not do a ToNumber conversion on the operands.
980 GenerateAddStrings(masm);
981 }
982
983 // Convert oddball arguments to numbers.
984 Label check, done;
985 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
986 __ j(not_equal, &check, Label::kNear);
987 if (Token::IsBitOp(op_)) {
988 __ xor_(rdx, rdx);
989 } else {
990 __ LoadRoot(rdx, Heap::kNanValueRootIndex);
991 }
992 __ jmp(&done, Label::kNear);
993 __ bind(&check);
994 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
995 __ j(not_equal, &done, Label::kNear);
996 if (Token::IsBitOp(op_)) {
997 __ xor_(rax, rax);
998 } else {
999 __ LoadRoot(rax, Heap::kNanValueRootIndex);
1000 }
1001 __ bind(&done);
1002
1003 GenerateNumberStub(masm);
1004 }
1005
1006
1007 static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
1008 Register input,
1009 Label* fail) {
1010 Label ok;
1011 __ JumpIfSmi(input, &ok, Label::kNear);
1012 Register heap_number_map = r8;
1013 Register scratch1 = r9;
1014 Register scratch2 = r10;
1015 // HeapNumbers containing 32bit integer values are also allowed.
1016 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1017 __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
1018 __ j(not_equal, fail);
1019 __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
1020 // Convert, convert back, and compare the two doubles' bits.
1021 __ cvttsd2siq(scratch2, xmm0);
1022 __ Cvtlsi2sd(xmm1, scratch2);
1023 __ movq(scratch1, xmm0);
1024 __ movq(scratch2, xmm1);
1025 __ cmpq(scratch1, scratch2);
1026 __ j(not_equal, fail);
1027 __ bind(&ok);
1028 }
1029
1030
1031 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
1032 Label gc_required, not_number;
1033
1034 // It could be that only SMIs have been seen at either the left
1035 // or the right operand. For precise type feedback, patch the IC
1036 // again if this changes.
1037 if (left_type_ == BinaryOpIC::SMI) {
1038 BinaryOpStub_CheckSmiInput(masm, rdx, &not_number);
1039 }
1040 if (right_type_ == BinaryOpIC::SMI) {
1041 BinaryOpStub_CheckSmiInput(masm, rax, &not_number);
1042 }
1043
1044 BinaryOpStub_GenerateFloatingPointCode(
1045 masm, &gc_required, &not_number, op_, mode_);
1046
1047 __ bind(&not_number);
1048 GenerateTypeTransition(masm);
1049
1050 __ bind(&gc_required);
1051 {
1052 FrameScope scope(masm, StackFrame::INTERNAL);
1053 GenerateRegisterArgsPush(masm);
1054 GenerateCallRuntime(masm);
1055 }
1056 __ Ret();
1057 }
1058
1059
1060 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1061 Label call_runtime, call_string_add_or_runtime;
1062
1063 BinaryOpStub_GenerateSmiCode(
1064 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
1065
1066 BinaryOpStub_GenerateFloatingPointCode(
1067 masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
1068
1069 __ bind(&call_string_add_or_runtime);
1070 if (op_ == Token::ADD) {
1071 GenerateAddStrings(masm);
1072 }
1073
1074 __ bind(&call_runtime);
1075 {
1076 FrameScope scope(masm, StackFrame::INTERNAL);
1077 GenerateRegisterArgsPush(masm);
1078 GenerateCallRuntime(masm);
1079 }
1080 __ Ret();
1081 }
1082
1083
1084 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
1085 Label* alloc_failure,
1086 OverwriteMode mode) {
1087 Label skip_allocation;
1088 switch (mode) {
1089 case OVERWRITE_LEFT: {
1090 // If the argument in rdx is already an object, we skip the
1091 // allocation of a heap number.
1092 __ JumpIfNotSmi(rdx, &skip_allocation);
1093 // Allocate a heap number for the result. Keep rax and rdx intact
1094 // for the possible runtime call.
1095 __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1096 // Now rdx can be overwritten losing one of the arguments as we are
1097 // now done and will not need it any more.
1098 __ movq(rdx, rbx);
1099 __ bind(&skip_allocation);
1100 // Use object in rdx as a result holder
1101 __ movq(rax, rdx);
1102 break;
1103 }
1104 case OVERWRITE_RIGHT:
1105 // If the argument in rax is already an object, we skip the
1106 // allocation of a heap number.
1107 __ JumpIfNotSmi(rax, &skip_allocation);
1108 // Fall through!
1109 case NO_OVERWRITE:
1110 // Allocate a heap number for the result. Keep rax and rdx intact
1111 // for the possible runtime call.
1112 __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1113 // Now rax can be overwritten losing one of the arguments as we are
1114 // now done and will not need it any more.
1115 __ movq(rax, rbx);
1116 __ bind(&skip_allocation);
1117 break;
1118 default: UNREACHABLE();
1119 }
1120 }
1121
1122
1123 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1124 __ push(rdx);
1125 __ push(rax);
1126 }
1127
1128
1129 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 551 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1130 // TAGGED case: 552 // TAGGED case:
1131 // Input: 553 // Input:
1132 // rsp[8] : argument (should be number). 554 // rsp[8] : argument (should be number).
1133 // rsp[0] : return address. 555 // rsp[0] : return address.
1134 // Output: 556 // Output:
1135 // rax: tagged double result. 557 // rax: tagged double result.
1136 // UNTAGGED case: 558 // UNTAGGED case:
1137 // Input:: 559 // Input::
1138 // rsp[0] : return address. 560 // rsp[0] : return address.
(...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after
1425 __ bind(&done); 847 __ bind(&done);
1426 } else { 848 } else {
1427 ASSERT(type == TranscendentalCache::LOG); 849 ASSERT(type == TranscendentalCache::LOG);
1428 __ fldln2(); 850 __ fldln2();
1429 __ fxch(); 851 __ fxch();
1430 __ fyl2x(); 852 __ fyl2x();
1431 } 853 }
1432 } 854 }
1433 855
1434 856
1435 // Input: rdx, rax are the left and right objects of a bit op.
1436 // Output: rax, rcx are left and right integers for a bit op.
1437 // Jump to conversion_failure: rdx and rax are unchanged.
1438 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
1439 Label* conversion_failure,
1440 Register heap_number_map) {
1441 // Check float operands.
1442 Label arg1_is_object, check_undefined_arg1;
1443 Label arg2_is_object, check_undefined_arg2;
1444 Label load_arg2, done;
1445
1446 __ JumpIfNotSmi(rdx, &arg1_is_object);
1447 __ SmiToInteger32(r8, rdx);
1448 __ jmp(&load_arg2);
1449
1450 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1451 __ bind(&check_undefined_arg1);
1452 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1453 __ j(not_equal, conversion_failure);
1454 __ Set(r8, 0);
1455 __ jmp(&load_arg2);
1456
1457 __ bind(&arg1_is_object);
1458 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
1459 __ j(not_equal, &check_undefined_arg1);
1460 // Get the untagged integer version of the rdx heap number in r8.
1461 __ TruncateHeapNumberToI(r8, rdx);
1462
1463 // Here r8 has the untagged integer, rax has a Smi or a heap number.
1464 __ bind(&load_arg2);
1465 // Test if arg2 is a Smi.
1466 __ JumpIfNotSmi(rax, &arg2_is_object);
1467 __ SmiToInteger32(rcx, rax);
1468 __ jmp(&done);
1469
1470 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1471 __ bind(&check_undefined_arg2);
1472 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1473 __ j(not_equal, conversion_failure);
1474 __ Set(rcx, 0);
1475 __ jmp(&done);
1476
1477 __ bind(&arg2_is_object);
1478 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
1479 __ j(not_equal, &check_undefined_arg2);
1480 // Get the untagged integer version of the rax heap number in rcx.
1481 __ TruncateHeapNumberToI(rcx, rax);
1482
1483 __ bind(&done);
1484 __ movl(rax, r8);
1485 }
1486
1487
1488 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
1489 __ SmiToInteger32(kScratchRegister, rdx);
1490 __ Cvtlsi2sd(xmm0, kScratchRegister);
1491 __ SmiToInteger32(kScratchRegister, rax);
1492 __ Cvtlsi2sd(xmm1, kScratchRegister);
1493 }
1494
1495
1496 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, 857 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
1497 Label* not_numbers) { 858 Label* not_numbers) {
1498 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; 859 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
1499 // Load operand in rdx into xmm0, or branch to not_numbers. 860 // Load operand in rdx into xmm0, or branch to not_numbers.
1500 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); 861 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
1501 __ JumpIfSmi(rdx, &load_smi_rdx); 862 __ JumpIfSmi(rdx, &load_smi_rdx);
1502 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx); 863 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
1503 __ j(not_equal, not_numbers); // Argument in rdx is not a number. 864 __ j(not_equal, not_numbers); // Argument in rdx is not a number.
1504 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); 865 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1505 // Load operand in rax into xmm1, or branch to not_numbers. 866 // Load operand in rax into xmm1, or branch to not_numbers.
(...skipping 10 matching lines...) Expand all
1516 __ Cvtlsi2sd(xmm0, kScratchRegister); 877 __ Cvtlsi2sd(xmm0, kScratchRegister);
1517 __ JumpIfNotSmi(rax, &load_nonsmi_rax); 878 __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1518 879
1519 __ bind(&load_smi_rax); 880 __ bind(&load_smi_rax);
1520 __ SmiToInteger32(kScratchRegister, rax); 881 __ SmiToInteger32(kScratchRegister, rax);
1521 __ Cvtlsi2sd(xmm1, kScratchRegister); 882 __ Cvtlsi2sd(xmm1, kScratchRegister);
1522 __ bind(&done); 883 __ bind(&done);
1523 } 884 }
1524 885
1525 886
1526 void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
1527 Register first,
1528 Register second,
1529 Register scratch1,
1530 Register scratch2,
1531 Register scratch3,
1532 Label* on_success,
1533 Label* on_not_smis,
1534 ConvertUndefined convert_undefined) {
1535 Register heap_number_map = scratch3;
1536 Register smi_result = scratch1;
1537 Label done, maybe_undefined_first, maybe_undefined_second, first_done;
1538
1539 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1540
1541 Label first_smi;
1542 __ JumpIfSmi(first, &first_smi, Label::kNear);
1543 __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
1544 __ j(not_equal,
1545 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
1546 ? &maybe_undefined_first
1547 : on_not_smis);
1548 // Convert HeapNumber to smi if possible.
1549 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
1550 __ movq(scratch2, xmm0);
1551 __ cvttsd2siq(smi_result, xmm0);
1552 // Check if conversion was successful by converting back and
1553 // comparing to the original double's bits.
1554 __ Cvtlsi2sd(xmm1, smi_result);
1555 __ movq(kScratchRegister, xmm1);
1556 __ cmpq(scratch2, kScratchRegister);
1557 __ j(not_equal, on_not_smis);
1558 __ Integer32ToSmi(first, smi_result);
1559
1560 __ bind(&first_done);
1561 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
1562 __ bind(&first_smi);
1563 __ AssertNotSmi(second);
1564 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
1565 __ j(not_equal,
1566 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
1567 ? &maybe_undefined_second
1568 : on_not_smis);
1569 // Convert second to smi, if possible.
1570 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
1571 __ movq(scratch2, xmm0);
1572 __ cvttsd2siq(smi_result, xmm0);
1573 __ Cvtlsi2sd(xmm1, smi_result);
1574 __ movq(kScratchRegister, xmm1);
1575 __ cmpq(scratch2, kScratchRegister);
1576 __ j(not_equal, on_not_smis);
1577 __ Integer32ToSmi(second, smi_result);
1578 if (on_success != NULL) {
1579 __ jmp(on_success);
1580 } else {
1581 __ jmp(&done);
1582 }
1583
1584 __ bind(&maybe_undefined_first);
1585 __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
1586 __ j(not_equal, on_not_smis);
1587 __ xor_(first, first);
1588 __ jmp(&first_done);
1589
1590 __ bind(&maybe_undefined_second);
1591 __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
1592 __ j(not_equal, on_not_smis);
1593 __ xor_(second, second);
1594 if (on_success != NULL) {
1595 __ jmp(on_success);
1596 }
1597 // Else: fall through.
1598
1599 __ bind(&done);
1600 }
1601
1602
1603 void MathPowStub::Generate(MacroAssembler* masm) { 887 void MathPowStub::Generate(MacroAssembler* masm) {
1604 const Register exponent = rdx; 888 const Register exponent = rdx;
1605 const Register base = rax; 889 const Register base = rax;
1606 const Register scratch = rcx; 890 const Register scratch = rcx;
1607 const XMMRegister double_result = xmm3; 891 const XMMRegister double_result = xmm3;
1608 const XMMRegister double_base = xmm2; 892 const XMMRegister double_base = xmm2;
1609 const XMMRegister double_exponent = xmm1; 893 const XMMRegister double_exponent = xmm1;
1610 const XMMRegister double_scratch = xmm4; 894 const XMMRegister double_scratch = xmm4;
1611 895
1612 Label call_runtime, done, exponent_not_smi, int_exponent; 896 Label call_runtime, done, exponent_not_smi, int_exponent;
(...skipping 1847 matching lines...) Expand 10 before | Expand all | Expand 10 after
3460 2744
3461 2745
3462 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { 2746 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3463 CEntryStub::GenerateAheadOfTime(isolate); 2747 CEntryStub::GenerateAheadOfTime(isolate);
3464 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); 2748 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
3465 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); 2749 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
3466 // It is important that the store buffer overflow stubs are generated first. 2750 // It is important that the store buffer overflow stubs are generated first.
3467 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); 2751 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
3468 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); 2752 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
3469 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); 2753 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
2754 BinaryOpStub::GenerateAheadOfTime(isolate);
3470 } 2755 }
3471 2756
3472 2757
3473 void CodeStub::GenerateFPStubs(Isolate* isolate) { 2758 void CodeStub::GenerateFPStubs(Isolate* isolate) {
3474 } 2759 }
3475 2760
3476 2761
3477 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { 2762 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
3478 CEntryStub stub(1, kDontSaveFPRegs); 2763 CEntryStub stub(1, kDontSaveFPRegs);
3479 stub.GetCode(isolate)->set_is_pregenerated(true); 2764 stub.GetCode(isolate)->set_is_pregenerated(true);
(...skipping 3066 matching lines...) Expand 10 before | Expand all | Expand 10 after
6546 __ bind(&fast_elements_case); 5831 __ bind(&fast_elements_case);
6547 GenerateCase(masm, FAST_ELEMENTS); 5832 GenerateCase(masm, FAST_ELEMENTS);
6548 } 5833 }
6549 5834
6550 5835
6551 #undef __ 5836 #undef __
6552 5837
6553 } } // namespace v8::internal 5838 } } // namespace v8::internal
6554 5839
6555 #endif // V8_TARGET_ARCH_X64 5840 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/v8-counters.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698