Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(485)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 23618002: Hydrogenisation of binops (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: rebase Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/v8-counters.h ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
138 Isolate* isolate, 138 Isolate* isolate,
139 CodeStubInterfaceDescriptor* descriptor) { 139 CodeStubInterfaceDescriptor* descriptor) {
140 static Register registers[] = { rax, rbx }; 140 static Register registers[] = { rax, rbx };
141 descriptor->register_param_count_ = 2; 141 descriptor->register_param_count_ = 2;
142 descriptor->register_params_ = registers; 142 descriptor->register_params_ = registers;
143 descriptor->deoptimization_handler_ = 143 descriptor->deoptimization_handler_ =
144 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; 144 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
145 } 145 }
146 146
147 147
148 void BinaryOpStub::InitializeInterfaceDescriptor(
149 Isolate* isolate,
150 CodeStubInterfaceDescriptor* descriptor) {
151 static Register registers[] = { rdx, rax };
152 descriptor->register_param_count_ = 2;
153 descriptor->register_params_ = registers;
154 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
155 descriptor->SetMissHandler(
156 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
157 }
158
159
148 static void InitializeArrayConstructorDescriptor( 160 static void InitializeArrayConstructorDescriptor(
149 Isolate* isolate, 161 Isolate* isolate,
150 CodeStubInterfaceDescriptor* descriptor, 162 CodeStubInterfaceDescriptor* descriptor,
151 int constant_stack_parameter_count) { 163 int constant_stack_parameter_count) {
152 // register state 164 // register state
153 // rax -- number of arguments 165 // rax -- number of arguments
154 // rdi -- function 166 // rdi -- function
155 // rbx -- type info cell with elements kind 167 // rbx -- type info cell with elements kind
156 static Register registers[] = { rdi, rbx }; 168 static Register registers[] = { rdi, rbx };
157 descriptor->register_param_count_ = 2; 169 descriptor->register_param_count_ = 2;
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after
430 class FloatingPointHelper : public AllStatic { 442 class FloatingPointHelper : public AllStatic {
431 public: 443 public:
432 enum ConvertUndefined { 444 enum ConvertUndefined {
433 CONVERT_UNDEFINED_TO_ZERO, 445 CONVERT_UNDEFINED_TO_ZERO,
434 BAILOUT_ON_UNDEFINED 446 BAILOUT_ON_UNDEFINED
435 }; 447 };
436 // Load the operands from rdx and rax into xmm0 and xmm1, as doubles. 448 // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
437 // If the operands are not both numbers, jump to not_numbers. 449 // If the operands are not both numbers, jump to not_numbers.
438 // Leaves rdx and rax unchanged. SmiOperands assumes both are smis. 450 // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
439 // NumberOperands assumes both are smis or heap numbers. 451 // NumberOperands assumes both are smis or heap numbers.
440 static void LoadSSE2SmiOperands(MacroAssembler* masm);
441 static void LoadSSE2UnknownOperands(MacroAssembler* masm, 452 static void LoadSSE2UnknownOperands(MacroAssembler* masm,
442 Label* not_numbers); 453 Label* not_numbers);
443
444 // Takes the operands in rdx and rax and loads them as integers in rax
445 // and rcx.
446 static void LoadAsIntegers(MacroAssembler* masm,
447 Label* operand_conversion_failure,
448 Register heap_number_map);
449
450 // Tries to convert two values to smis losslessly.
451 // This fails if either argument is not a Smi nor a HeapNumber,
452 // or if it's a HeapNumber with a value that can't be converted
453 // losslessly to a Smi. In that case, control transitions to the
454 // on_not_smis label.
455 // On success, either control goes to the on_success label (if one is
456 // provided), or it falls through at the end of the code (if on_success
457 // is NULL).
458 // On success, both first and second holds Smi tagged values.
459 // One of first or second must be non-Smi when entering.
460 static void NumbersToSmis(MacroAssembler* masm,
461 Register first,
462 Register second,
463 Register scratch1,
464 Register scratch2,
465 Register scratch3,
466 Label* on_success,
467 Label* on_not_smis,
468 ConvertUndefined convert_undefined);
469 }; 454 };
470 455
471 456
472 void DoubleToIStub::Generate(MacroAssembler* masm) { 457 void DoubleToIStub::Generate(MacroAssembler* masm) {
473 Register input_reg = this->source(); 458 Register input_reg = this->source();
474 Register final_result_reg = this->destination(); 459 Register final_result_reg = this->destination();
475 ASSERT(is_truncating()); 460 ASSERT(is_truncating());
476 461
477 Label check_negative, process_64_bits, done; 462 Label check_negative, process_64_bits, done;
478 463
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
546 if (!final_result_reg.is(result_reg)) { 531 if (!final_result_reg.is(result_reg)) {
547 ASSERT(final_result_reg.is(rcx)); 532 ASSERT(final_result_reg.is(rcx));
548 __ movl(final_result_reg, result_reg); 533 __ movl(final_result_reg, result_reg);
549 } 534 }
550 __ pop(save_reg); 535 __ pop(save_reg);
551 __ pop(scratch1); 536 __ pop(scratch1);
552 __ ret(0); 537 __ ret(0);
553 } 538 }
554 539
555 540
556 void BinaryOpStub::Initialize() {}
557
558
559 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
560 __ PopReturnAddressTo(rcx);
561 __ push(rdx);
562 __ push(rax);
563 // Left and right arguments are now on top.
564 __ Push(Smi::FromInt(MinorKey()));
565
566 __ PushReturnAddressFrom(rcx);
567
568 // Patch the caller to an appropriate specialized stub and return the
569 // operation result to the caller of the stub.
570 __ TailCallExternalReference(
571 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
572 masm->isolate()),
573 3,
574 1);
575 }
576
577
578 static void BinaryOpStub_GenerateSmiCode(
579 MacroAssembler* masm,
580 Label* slow,
581 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
582 Token::Value op) {
583
584 // Arguments to BinaryOpStub are in rdx and rax.
585 const Register left = rdx;
586 const Register right = rax;
587
588 // We only generate heapnumber answers for overflowing calculations
589 // for the four basic arithmetic operations and logical right shift by 0.
590 bool generate_inline_heapnumber_results =
591 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
592 (op == Token::ADD || op == Token::SUB ||
593 op == Token::MUL || op == Token::DIV || op == Token::SHR);
594
595 // Smi check of both operands. If op is BIT_OR, the check is delayed
596 // until after the OR operation.
597 Label not_smis;
598 Label use_fp_on_smis;
599 Label fail;
600
601 if (op != Token::BIT_OR) {
602 Comment smi_check_comment(masm, "-- Smi check arguments");
603 __ JumpIfNotBothSmi(left, right, &not_smis);
604 }
605
606 Label smi_values;
607 __ bind(&smi_values);
608 // Perform the operation.
609 Comment perform_smi(masm, "-- Perform smi operation");
610 switch (op) {
611 case Token::ADD:
612 ASSERT(right.is(rax));
613 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
614 break;
615
616 case Token::SUB:
617 __ SmiSub(left, left, right, &use_fp_on_smis);
618 __ movq(rax, left);
619 break;
620
621 case Token::MUL:
622 ASSERT(right.is(rax));
623 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
624 break;
625
626 case Token::DIV:
627 // SmiDiv will not accept left in rdx or right in rax.
628 __ movq(rbx, rax);
629 __ movq(rcx, rdx);
630 __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
631 break;
632
633 case Token::MOD:
634 // SmiMod will not accept left in rdx or right in rax.
635 __ movq(rbx, rax);
636 __ movq(rcx, rdx);
637 __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
638 break;
639
640 case Token::BIT_OR: {
641 ASSERT(right.is(rax));
642 __ SmiOrIfSmis(right, right, left, &not_smis); // BIT_OR is commutative.
643 break;
644 }
645 case Token::BIT_XOR:
646 ASSERT(right.is(rax));
647 __ SmiXor(right, right, left); // BIT_XOR is commutative.
648 break;
649
650 case Token::BIT_AND:
651 ASSERT(right.is(rax));
652 __ SmiAnd(right, right, left); // BIT_AND is commutative.
653 break;
654
655 case Token::SHL:
656 __ SmiShiftLeft(left, left, right);
657 __ movq(rax, left);
658 break;
659
660 case Token::SAR:
661 __ SmiShiftArithmeticRight(left, left, right);
662 __ movq(rax, left);
663 break;
664
665 case Token::SHR:
666 __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
667 __ movq(rax, left);
668 break;
669
670 default:
671 UNREACHABLE();
672 }
673
674 // 5. Emit return of result in rax. Some operations have registers pushed.
675 __ ret(0);
676
677 if (use_fp_on_smis.is_linked()) {
678 // 6. For some operations emit inline code to perform floating point
679 // operations on known smis (e.g., if the result of the operation
680 // overflowed the smi range).
681 __ bind(&use_fp_on_smis);
682 if (op == Token::DIV || op == Token::MOD) {
683 // Restore left and right to rdx and rax.
684 __ movq(rdx, rcx);
685 __ movq(rax, rbx);
686 }
687
688 if (generate_inline_heapnumber_results) {
689 __ AllocateHeapNumber(rcx, rbx, slow);
690 Comment perform_float(masm, "-- Perform float operation on smis");
691 if (op == Token::SHR) {
692 __ SmiToInteger32(left, left);
693 __ cvtqsi2sd(xmm0, left);
694 } else {
695 FloatingPointHelper::LoadSSE2SmiOperands(masm);
696 switch (op) {
697 case Token::ADD: __ addsd(xmm0, xmm1); break;
698 case Token::SUB: __ subsd(xmm0, xmm1); break;
699 case Token::MUL: __ mulsd(xmm0, xmm1); break;
700 case Token::DIV: __ divsd(xmm0, xmm1); break;
701 default: UNREACHABLE();
702 }
703 }
704 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
705 __ movq(rax, rcx);
706 __ ret(0);
707 } else {
708 __ jmp(&fail);
709 }
710 }
711
712 // 7. Non-smi operands reach the end of the code generated by
713 // GenerateSmiCode, and fall through to subsequent code,
714 // with the operands in rdx and rax.
715 // But first we check if non-smi values are HeapNumbers holding
716 // values that could be smi.
717 __ bind(&not_smis);
718 Comment done_comment(masm, "-- Enter non-smi code");
719 FloatingPointHelper::ConvertUndefined convert_undefined =
720 FloatingPointHelper::BAILOUT_ON_UNDEFINED;
721 // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
722 if (op == Token::BIT_AND ||
723 op == Token::BIT_OR ||
724 op == Token::BIT_XOR ||
725 op == Token::SAR ||
726 op == Token::SHL ||
727 op == Token::SHR) {
728 convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
729 }
730 FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
731 &smi_values, &fail, convert_undefined);
732 __ jmp(&smi_values);
733 __ bind(&fail);
734 }
735
736
737 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
738 Label* alloc_failure,
739 OverwriteMode mode);
740
741
742 static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
743 Label* allocation_failure,
744 Label* non_numeric_failure,
745 Token::Value op,
746 OverwriteMode mode) {
747 switch (op) {
748 case Token::ADD:
749 case Token::SUB:
750 case Token::MUL:
751 case Token::DIV: {
752 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
753
754 switch (op) {
755 case Token::ADD: __ addsd(xmm0, xmm1); break;
756 case Token::SUB: __ subsd(xmm0, xmm1); break;
757 case Token::MUL: __ mulsd(xmm0, xmm1); break;
758 case Token::DIV: __ divsd(xmm0, xmm1); break;
759 default: UNREACHABLE();
760 }
761 BinaryOpStub_GenerateHeapResultAllocation(
762 masm, allocation_failure, mode);
763 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
764 __ ret(0);
765 break;
766 }
767 case Token::MOD: {
768 // For MOD we jump to the allocation_failure label, to call runtime.
769 __ jmp(allocation_failure);
770 break;
771 }
772 case Token::BIT_OR:
773 case Token::BIT_AND:
774 case Token::BIT_XOR:
775 case Token::SAR:
776 case Token::SHL:
777 case Token::SHR: {
778 Label non_smi_shr_result;
779 Register heap_number_map = r9;
780 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
781 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
782 heap_number_map);
783 switch (op) {
784 case Token::BIT_OR: __ orl(rax, rcx); break;
785 case Token::BIT_AND: __ andl(rax, rcx); break;
786 case Token::BIT_XOR: __ xorl(rax, rcx); break;
787 case Token::SAR: __ sarl_cl(rax); break;
788 case Token::SHL: __ shll_cl(rax); break;
789 case Token::SHR: {
790 __ shrl_cl(rax);
791 // Check if result is negative. This can only happen for a shift
792 // by zero.
793 __ testl(rax, rax);
794 __ j(negative, &non_smi_shr_result);
795 break;
796 }
797 default: UNREACHABLE();
798 }
799 STATIC_ASSERT(kSmiValueSize == 32);
800 // Tag smi result and return.
801 __ Integer32ToSmi(rax, rax);
802 __ Ret();
803
804 // Logical shift right can produce an unsigned int32 that is not
805 // an int32, and so is not in the smi range. Allocate a heap number
806 // in that case.
807 if (op == Token::SHR) {
808 __ bind(&non_smi_shr_result);
809 Label allocation_failed;
810 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
811 // Allocate heap number in new space.
812 // Not using AllocateHeapNumber macro in order to reuse
813 // already loaded heap_number_map.
814 __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed,
815 TAG_OBJECT);
816 // Set the map.
817 __ AssertRootValue(heap_number_map,
818 Heap::kHeapNumberMapRootIndex,
819 kHeapNumberMapRegisterClobbered);
820 __ movq(FieldOperand(rax, HeapObject::kMapOffset),
821 heap_number_map);
822 __ cvtqsi2sd(xmm0, rbx);
823 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
824 __ Ret();
825
826 __ bind(&allocation_failed);
827 // We need tagged values in rdx and rax for the following code,
828 // not int32 in rax and rcx.
829 __ Integer32ToSmi(rax, rcx);
830 __ Integer32ToSmi(rdx, rbx);
831 __ jmp(allocation_failure);
832 }
833 break;
834 }
835 default: UNREACHABLE(); break;
836 }
837 // No fall-through from this generated code.
838 if (FLAG_debug_code) {
839 __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode);
840 }
841 }
842
843
844 static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn(
845 MacroAssembler* masm) {
846 // Push arguments, but ensure they are under the return address
847 // for a tail call.
848 __ PopReturnAddressTo(rcx);
849 __ push(rdx);
850 __ push(rax);
851 __ PushReturnAddressFrom(rcx);
852 }
853
854
855 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
856 ASSERT(op_ == Token::ADD);
857 Label left_not_string, call_runtime;
858
859 // Registers containing left and right operands respectively.
860 Register left = rdx;
861 Register right = rax;
862
863 // Test if left operand is a string.
864 __ JumpIfSmi(left, &left_not_string, Label::kNear);
865 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
866 __ j(above_equal, &left_not_string, Label::kNear);
867 StringAddStub string_add_left_stub(
868 (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
869 BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
870 __ TailCallStub(&string_add_left_stub);
871
872 // Left operand is not a string, test right.
873 __ bind(&left_not_string);
874 __ JumpIfSmi(right, &call_runtime, Label::kNear);
875 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
876 __ j(above_equal, &call_runtime, Label::kNear);
877
878 StringAddStub string_add_right_stub(
879 (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
880 BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
881 __ TailCallStub(&string_add_right_stub);
882
883 // Neither argument is a string.
884 __ bind(&call_runtime);
885 }
886
887
888 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
889 Label right_arg_changed, call_runtime;
890
891 if (op_ == Token::MOD && encoded_right_arg_.has_value) {
892 // It is guaranteed that the value will fit into a Smi, because if it
893 // didn't, we wouldn't be here, see BinaryOp_Patch.
894 __ Cmp(rax, Smi::FromInt(fixed_right_arg_value()));
895 __ j(not_equal, &right_arg_changed);
896 }
897
898 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
899 result_type_ == BinaryOpIC::SMI) {
900 // Only allow smi results.
901 BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
902 } else {
903 // Allow heap number result and don't make a transition if a heap number
904 // cannot be allocated.
905 BinaryOpStub_GenerateSmiCode(
906 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
907 }
908
909 // Code falls through if the result is not returned as either a smi or heap
910 // number.
911 __ bind(&right_arg_changed);
912 GenerateTypeTransition(masm);
913
914 if (call_runtime.is_linked()) {
915 __ bind(&call_runtime);
916 {
917 FrameScope scope(masm, StackFrame::INTERNAL);
918 GenerateRegisterArgsPush(masm);
919 GenerateCallRuntime(masm);
920 }
921 __ Ret();
922 }
923 }
924
925
926 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
927 // The int32 case is identical to the Smi case. We avoid creating this
928 // ic state on x64.
929 UNREACHABLE();
930 }
931
932
933 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
934 Label call_runtime;
935 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
936 ASSERT(op_ == Token::ADD);
937 // If both arguments are strings, call the string add stub.
938 // Otherwise, do a transition.
939
940 // Registers containing left and right operands respectively.
941 Register left = rdx;
942 Register right = rax;
943
944 // Test if left operand is a string.
945 __ JumpIfSmi(left, &call_runtime);
946 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
947 __ j(above_equal, &call_runtime);
948
949 // Test if right operand is a string.
950 __ JumpIfSmi(right, &call_runtime);
951 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
952 __ j(above_equal, &call_runtime);
953
954 StringAddStub string_add_stub(
955 (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
956 BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
957 __ TailCallStub(&string_add_stub);
958
959 __ bind(&call_runtime);
960 GenerateTypeTransition(masm);
961 }
962
963
964 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
965 Label call_runtime;
966
967 if (op_ == Token::ADD) {
968 // Handle string addition here, because it is the only operation
969 // that does not do a ToNumber conversion on the operands.
970 GenerateAddStrings(masm);
971 }
972
973 // Convert oddball arguments to numbers.
974 Label check, done;
975 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
976 __ j(not_equal, &check, Label::kNear);
977 if (Token::IsBitOp(op_)) {
978 __ xor_(rdx, rdx);
979 } else {
980 __ LoadRoot(rdx, Heap::kNanValueRootIndex);
981 }
982 __ jmp(&done, Label::kNear);
983 __ bind(&check);
984 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
985 __ j(not_equal, &done, Label::kNear);
986 if (Token::IsBitOp(op_)) {
987 __ xor_(rax, rax);
988 } else {
989 __ LoadRoot(rax, Heap::kNanValueRootIndex);
990 }
991 __ bind(&done);
992
993 GenerateNumberStub(masm);
994 }
995
996
997 static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
998 Register input,
999 Label* fail) {
1000 Label ok;
1001 __ JumpIfSmi(input, &ok, Label::kNear);
1002 Register heap_number_map = r8;
1003 Register scratch1 = r9;
1004 Register scratch2 = r10;
1005 // HeapNumbers containing 32bit integer values are also allowed.
1006 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1007 __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
1008 __ j(not_equal, fail);
1009 __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
1010 // Convert, convert back, and compare the two doubles' bits.
1011 __ cvttsd2siq(scratch2, xmm0);
1012 __ Cvtlsi2sd(xmm1, scratch2);
1013 __ movq(scratch1, xmm0);
1014 __ movq(scratch2, xmm1);
1015 __ cmpq(scratch1, scratch2);
1016 __ j(not_equal, fail);
1017 __ bind(&ok);
1018 }
1019
1020
1021 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
1022 Label gc_required, not_number;
1023
1024 // It could be that only SMIs have been seen at either the left
1025 // or the right operand. For precise type feedback, patch the IC
1026 // again if this changes.
1027 if (left_type_ == BinaryOpIC::SMI) {
1028 BinaryOpStub_CheckSmiInput(masm, rdx, &not_number);
1029 }
1030 if (right_type_ == BinaryOpIC::SMI) {
1031 BinaryOpStub_CheckSmiInput(masm, rax, &not_number);
1032 }
1033
1034 BinaryOpStub_GenerateFloatingPointCode(
1035 masm, &gc_required, &not_number, op_, mode_);
1036
1037 __ bind(&not_number);
1038 GenerateTypeTransition(masm);
1039
1040 __ bind(&gc_required);
1041 {
1042 FrameScope scope(masm, StackFrame::INTERNAL);
1043 GenerateRegisterArgsPush(masm);
1044 GenerateCallRuntime(masm);
1045 }
1046 __ Ret();
1047 }
1048
1049
1050 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1051 Label call_runtime, call_string_add_or_runtime;
1052
1053 BinaryOpStub_GenerateSmiCode(
1054 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
1055
1056 BinaryOpStub_GenerateFloatingPointCode(
1057 masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
1058
1059 __ bind(&call_string_add_or_runtime);
1060 if (op_ == Token::ADD) {
1061 GenerateAddStrings(masm);
1062 }
1063
1064 __ bind(&call_runtime);
1065 {
1066 FrameScope scope(masm, StackFrame::INTERNAL);
1067 GenerateRegisterArgsPush(masm);
1068 GenerateCallRuntime(masm);
1069 }
1070 __ Ret();
1071 }
1072
1073
1074 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
1075 Label* alloc_failure,
1076 OverwriteMode mode) {
1077 Label skip_allocation;
1078 switch (mode) {
1079 case OVERWRITE_LEFT: {
1080 // If the argument in rdx is already an object, we skip the
1081 // allocation of a heap number.
1082 __ JumpIfNotSmi(rdx, &skip_allocation);
1083 // Allocate a heap number for the result. Keep rax and rdx intact
1084 // for the possible runtime call.
1085 __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1086 // Now rdx can be overwritten losing one of the arguments as we are
1087 // now done and will not need it any more.
1088 __ movq(rdx, rbx);
1089 __ bind(&skip_allocation);
1090 // Use object in rdx as a result holder
1091 __ movq(rax, rdx);
1092 break;
1093 }
1094 case OVERWRITE_RIGHT:
1095 // If the argument in rax is already an object, we skip the
1096 // allocation of a heap number.
1097 __ JumpIfNotSmi(rax, &skip_allocation);
1098 // Fall through!
1099 case NO_OVERWRITE:
1100 // Allocate a heap number for the result. Keep rax and rdx intact
1101 // for the possible runtime call.
1102 __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1103 // Now rax can be overwritten losing one of the arguments as we are
1104 // now done and will not need it any more.
1105 __ movq(rax, rbx);
1106 __ bind(&skip_allocation);
1107 break;
1108 default: UNREACHABLE();
1109 }
1110 }
1111
1112
1113 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1114 __ push(rdx);
1115 __ push(rax);
1116 }
1117
1118
1119 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 541 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1120 // TAGGED case: 542 // TAGGED case:
1121 // Input: 543 // Input:
1122 // rsp[8] : argument (should be number). 544 // rsp[8] : argument (should be number).
1123 // rsp[0] : return address. 545 // rsp[0] : return address.
1124 // Output: 546 // Output:
1125 // rax: tagged double result. 547 // rax: tagged double result.
1126 // UNTAGGED case: 548 // UNTAGGED case:
1127 // Input:: 549 // Input::
1128 // rsp[0] : return address. 550 // rsp[0] : return address.
(...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after
1415 __ bind(&done); 837 __ bind(&done);
1416 } else { 838 } else {
1417 ASSERT(type == TranscendentalCache::LOG); 839 ASSERT(type == TranscendentalCache::LOG);
1418 __ fldln2(); 840 __ fldln2();
1419 __ fxch(); 841 __ fxch();
1420 __ fyl2x(); 842 __ fyl2x();
1421 } 843 }
1422 } 844 }
1423 845
1424 846
1425 // Input: rdx, rax are the left and right objects of a bit op.
1426 // Output: rax, rcx are left and right integers for a bit op.
1427 // Jump to conversion_failure: rdx and rax are unchanged.
1428 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
1429 Label* conversion_failure,
1430 Register heap_number_map) {
1431 // Check float operands.
1432 Label arg1_is_object, check_undefined_arg1;
1433 Label arg2_is_object, check_undefined_arg2;
1434 Label load_arg2, done;
1435
1436 __ JumpIfNotSmi(rdx, &arg1_is_object);
1437 __ SmiToInteger32(r8, rdx);
1438 __ jmp(&load_arg2);
1439
1440 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1441 __ bind(&check_undefined_arg1);
1442 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1443 __ j(not_equal, conversion_failure);
1444 __ Set(r8, 0);
1445 __ jmp(&load_arg2);
1446
1447 __ bind(&arg1_is_object);
1448 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
1449 __ j(not_equal, &check_undefined_arg1);
1450 // Get the untagged integer version of the rdx heap number in r8.
1451 __ TruncateHeapNumberToI(r8, rdx);
1452
1453 // Here r8 has the untagged integer, rax has a Smi or a heap number.
1454 __ bind(&load_arg2);
1455 // Test if arg2 is a Smi.
1456 __ JumpIfNotSmi(rax, &arg2_is_object);
1457 __ SmiToInteger32(rcx, rax);
1458 __ jmp(&done);
1459
1460 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1461 __ bind(&check_undefined_arg2);
1462 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1463 __ j(not_equal, conversion_failure);
1464 __ Set(rcx, 0);
1465 __ jmp(&done);
1466
1467 __ bind(&arg2_is_object);
1468 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
1469 __ j(not_equal, &check_undefined_arg2);
1470 // Get the untagged integer version of the rax heap number in rcx.
1471 __ TruncateHeapNumberToI(rcx, rax);
1472
1473 __ bind(&done);
1474 __ movl(rax, r8);
1475 }
1476
1477
1478 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
1479 __ SmiToInteger32(kScratchRegister, rdx);
1480 __ Cvtlsi2sd(xmm0, kScratchRegister);
1481 __ SmiToInteger32(kScratchRegister, rax);
1482 __ Cvtlsi2sd(xmm1, kScratchRegister);
1483 }
1484
1485
1486 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, 847 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
1487 Label* not_numbers) { 848 Label* not_numbers) {
1488 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; 849 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
1489 // Load operand in rdx into xmm0, or branch to not_numbers. 850 // Load operand in rdx into xmm0, or branch to not_numbers.
1490 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); 851 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
1491 __ JumpIfSmi(rdx, &load_smi_rdx); 852 __ JumpIfSmi(rdx, &load_smi_rdx);
1492 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx); 853 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
1493 __ j(not_equal, not_numbers); // Argument in rdx is not a number. 854 __ j(not_equal, not_numbers); // Argument in rdx is not a number.
1494 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); 855 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1495 // Load operand in rax into xmm1, or branch to not_numbers. 856 // Load operand in rax into xmm1, or branch to not_numbers.
(...skipping 10 matching lines...) Expand all
1506 __ Cvtlsi2sd(xmm0, kScratchRegister); 867 __ Cvtlsi2sd(xmm0, kScratchRegister);
1507 __ JumpIfNotSmi(rax, &load_nonsmi_rax); 868 __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1508 869
1509 __ bind(&load_smi_rax); 870 __ bind(&load_smi_rax);
1510 __ SmiToInteger32(kScratchRegister, rax); 871 __ SmiToInteger32(kScratchRegister, rax);
1511 __ Cvtlsi2sd(xmm1, kScratchRegister); 872 __ Cvtlsi2sd(xmm1, kScratchRegister);
1512 __ bind(&done); 873 __ bind(&done);
1513 } 874 }
1514 875
1515 876
1516 void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
1517 Register first,
1518 Register second,
1519 Register scratch1,
1520 Register scratch2,
1521 Register scratch3,
1522 Label* on_success,
1523 Label* on_not_smis,
1524 ConvertUndefined convert_undefined) {
1525 Register heap_number_map = scratch3;
1526 Register smi_result = scratch1;
1527 Label done, maybe_undefined_first, maybe_undefined_second, first_done;
1528
1529 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1530
1531 Label first_smi;
1532 __ JumpIfSmi(first, &first_smi, Label::kNear);
1533 __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
1534 __ j(not_equal,
1535 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
1536 ? &maybe_undefined_first
1537 : on_not_smis);
1538 // Convert HeapNumber to smi if possible.
1539 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
1540 __ movq(scratch2, xmm0);
1541 __ cvttsd2siq(smi_result, xmm0);
1542 // Check if conversion was successful by converting back and
1543 // comparing to the original double's bits.
1544 __ Cvtlsi2sd(xmm1, smi_result);
1545 __ movq(kScratchRegister, xmm1);
1546 __ cmpq(scratch2, kScratchRegister);
1547 __ j(not_equal, on_not_smis);
1548 __ Integer32ToSmi(first, smi_result);
1549
1550 __ bind(&first_done);
1551 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
1552 __ bind(&first_smi);
1553 __ AssertNotSmi(second);
1554 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
1555 __ j(not_equal,
1556 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
1557 ? &maybe_undefined_second
1558 : on_not_smis);
1559 // Convert second to smi, if possible.
1560 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
1561 __ movq(scratch2, xmm0);
1562 __ cvttsd2siq(smi_result, xmm0);
1563 __ Cvtlsi2sd(xmm1, smi_result);
1564 __ movq(kScratchRegister, xmm1);
1565 __ cmpq(scratch2, kScratchRegister);
1566 __ j(not_equal, on_not_smis);
1567 __ Integer32ToSmi(second, smi_result);
1568 if (on_success != NULL) {
1569 __ jmp(on_success);
1570 } else {
1571 __ jmp(&done);
1572 }
1573
1574 __ bind(&maybe_undefined_first);
1575 __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
1576 __ j(not_equal, on_not_smis);
1577 __ xor_(first, first);
1578 __ jmp(&first_done);
1579
1580 __ bind(&maybe_undefined_second);
1581 __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
1582 __ j(not_equal, on_not_smis);
1583 __ xor_(second, second);
1584 if (on_success != NULL) {
1585 __ jmp(on_success);
1586 }
1587 // Else: fall through.
1588
1589 __ bind(&done);
1590 }
1591
1592
1593 void MathPowStub::Generate(MacroAssembler* masm) { 877 void MathPowStub::Generate(MacroAssembler* masm) {
1594 const Register exponent = rdx; 878 const Register exponent = rdx;
1595 const Register base = rax; 879 const Register base = rax;
1596 const Register scratch = rcx; 880 const Register scratch = rcx;
1597 const XMMRegister double_result = xmm3; 881 const XMMRegister double_result = xmm3;
1598 const XMMRegister double_base = xmm2; 882 const XMMRegister double_base = xmm2;
1599 const XMMRegister double_exponent = xmm1; 883 const XMMRegister double_exponent = xmm1;
1600 const XMMRegister double_scratch = xmm4; 884 const XMMRegister double_scratch = xmm4;
1601 885
1602 Label call_runtime, done, exponent_not_smi, int_exponent; 886 Label call_runtime, done, exponent_not_smi, int_exponent;
(...skipping 1952 matching lines...) Expand 10 before | Expand all | Expand 10 after
3555 2839
3556 2840
3557 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { 2841 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3558 CEntryStub::GenerateAheadOfTime(isolate); 2842 CEntryStub::GenerateAheadOfTime(isolate);
3559 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); 2843 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
3560 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); 2844 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
3561 // It is important that the store buffer overflow stubs are generated first. 2845 // It is important that the store buffer overflow stubs are generated first.
3562 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); 2846 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
3563 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); 2847 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
3564 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); 2848 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
2849 BinaryOpStub::GenerateAheadOfTime(isolate);
3565 } 2850 }
3566 2851
3567 2852
3568 void CodeStub::GenerateFPStubs(Isolate* isolate) { 2853 void CodeStub::GenerateFPStubs(Isolate* isolate) {
3569 } 2854 }
3570 2855
3571 2856
3572 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { 2857 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
3573 CEntryStub stub(1, kDontSaveFPRegs); 2858 CEntryStub stub(1, kDontSaveFPRegs);
3574 stub.GetCode(isolate)->set_is_pregenerated(true); 2859 stub.GetCode(isolate)->set_is_pregenerated(true);
(...skipping 3066 matching lines...) Expand 10 before | Expand all | Expand 10 after
6641 __ bind(&fast_elements_case); 5926 __ bind(&fast_elements_case);
6642 GenerateCase(masm, FAST_ELEMENTS); 5927 GenerateCase(masm, FAST_ELEMENTS);
6643 } 5928 }
6644 5929
6645 5930
6646 #undef __ 5931 #undef __
6647 5932
6648 } } // namespace v8::internal 5933 } } // namespace v8::internal
6649 5934
6650 #endif // V8_TARGET_ARCH_X64 5935 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/v8-counters.h ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698