Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(193)

Side by Side Diff: src/crankshaft/s390/lithium-codegen-s390.cc

Issue 2161543002: [turbofan] Add support for eager/soft deoptimization reasons. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Do the ports properly Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/crankshaft/s390/lithium-codegen-s390.h ('k') | src/crankshaft/x64/lithium-codegen-x64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // 2 //
3 // Use of this source code is governed by a BSD-style license that can be 3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file. 4 // found in the LICENSE file.
5 5
6 #include "src/crankshaft/s390/lithium-codegen-s390.h" 6 #include "src/crankshaft/s390/lithium-codegen-s390.h"
7 7
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/code-factory.h" 9 #include "src/code-factory.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 657 matching lines...) Expand 10 before | Expand all | Expand 10 after
668 WriteTranslation(environment, &translation); 668 WriteTranslation(environment, &translation);
669 int deoptimization_index = deoptimizations_.length(); 669 int deoptimization_index = deoptimizations_.length();
670 int pc_offset = masm()->pc_offset(); 670 int pc_offset = masm()->pc_offset();
671 environment->Register(deoptimization_index, translation.index(), 671 environment->Register(deoptimization_index, translation.index(),
672 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 672 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
673 deoptimizations_.Add(environment, zone()); 673 deoptimizations_.Add(environment, zone());
674 } 674 }
675 } 675 }
676 676
677 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, 677 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
678 Deoptimizer::DeoptReason deopt_reason, 678 DeoptimizeReason deopt_reason,
679 Deoptimizer::BailoutType bailout_type, 679 Deoptimizer::BailoutType bailout_type,
680 CRegister cr) { 680 CRegister cr) {
681 LEnvironment* environment = instr->environment(); 681 LEnvironment* environment = instr->environment();
682 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 682 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
683 DCHECK(environment->HasBeenRegistered()); 683 DCHECK(environment->HasBeenRegistered());
684 int id = environment->deoptimization_index(); 684 int id = environment->deoptimization_index();
685 Address entry = 685 Address entry =
686 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 686 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
687 if (entry == NULL) { 687 if (entry == NULL) {
688 Abort(kBailoutWasNotPrepared); 688 Abort(kBailoutWasNotPrepared);
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
761 if (FLAG_trace_deopt || isolate()->is_profiling() || 761 if (FLAG_trace_deopt || isolate()->is_profiling() ||
762 jump_table_.is_empty() || 762 jump_table_.is_empty() ||
763 !table_entry.IsEquivalentTo(jump_table_.last())) { 763 !table_entry.IsEquivalentTo(jump_table_.last())) {
764 jump_table_.Add(table_entry, zone()); 764 jump_table_.Add(table_entry, zone());
765 } 765 }
766 __ b(cond, &jump_table_.last().label /*, cr*/); 766 __ b(cond, &jump_table_.last().label /*, cr*/);
767 } 767 }
768 } 768 }
769 769
770 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, 770 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
771 Deoptimizer::DeoptReason deopt_reason, 771 DeoptimizeReason deopt_reason, CRegister cr) {
772 CRegister cr) {
773 Deoptimizer::BailoutType bailout_type = 772 Deoptimizer::BailoutType bailout_type =
774 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; 773 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
775 DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr); 774 DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr);
776 } 775 }
777 776
778 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, 777 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
779 SafepointMode safepoint_mode) { 778 SafepointMode safepoint_mode) {
780 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 779 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
781 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); 780 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
782 } else { 781 } else {
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
871 Label dividend_is_not_negative, done; 870 Label dividend_is_not_negative, done;
872 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 871 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
873 __ CmpP(dividend, Operand::Zero()); 872 __ CmpP(dividend, Operand::Zero());
874 __ bge(&dividend_is_not_negative, Label::kNear); 873 __ bge(&dividend_is_not_negative, Label::kNear);
875 if (shift) { 874 if (shift) {
876 // Note that this is correct even for kMinInt operands. 875 // Note that this is correct even for kMinInt operands.
877 __ LoadComplementRR(dividend, dividend); 876 __ LoadComplementRR(dividend, dividend);
878 __ ExtractBitRange(dividend, dividend, shift - 1, 0); 877 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
879 __ LoadComplementRR(dividend, dividend); 878 __ LoadComplementRR(dividend, dividend);
880 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 879 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
881 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 880 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
882 } 881 }
883 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 882 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
884 __ mov(dividend, Operand::Zero()); 883 __ mov(dividend, Operand::Zero());
885 } else { 884 } else {
886 DeoptimizeIf(al, instr, Deoptimizer::kMinusZero); 885 DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero);
887 } 886 }
888 __ b(&done, Label::kNear); 887 __ b(&done, Label::kNear);
889 } 888 }
890 889
891 __ bind(&dividend_is_not_negative); 890 __ bind(&dividend_is_not_negative);
892 if (shift) { 891 if (shift) {
893 __ ExtractBitRange(dividend, dividend, shift - 1, 0); 892 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
894 } else { 893 } else {
895 __ mov(dividend, Operand::Zero()); 894 __ mov(dividend, Operand::Zero());
896 } 895 }
897 __ bind(&done); 896 __ bind(&done);
898 } 897 }
899 898
900 void LCodeGen::DoModByConstI(LModByConstI* instr) { 899 void LCodeGen::DoModByConstI(LModByConstI* instr) {
901 Register dividend = ToRegister(instr->dividend()); 900 Register dividend = ToRegister(instr->dividend());
902 int32_t divisor = instr->divisor(); 901 int32_t divisor = instr->divisor();
903 Register result = ToRegister(instr->result()); 902 Register result = ToRegister(instr->result());
904 DCHECK(!dividend.is(result)); 903 DCHECK(!dividend.is(result));
905 904
906 if (divisor == 0) { 905 if (divisor == 0) {
907 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); 906 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
908 return; 907 return;
909 } 908 }
910 909
911 __ TruncatingDiv(result, dividend, Abs(divisor)); 910 __ TruncatingDiv(result, dividend, Abs(divisor));
912 __ mov(ip, Operand(Abs(divisor))); 911 __ mov(ip, Operand(Abs(divisor)));
913 __ Mul(result, result, ip); 912 __ Mul(result, result, ip);
914 __ SubP(result, dividend, result /*, LeaveOE, SetRC*/); 913 __ SubP(result, dividend, result /*, LeaveOE, SetRC*/);
915 914
916 // Check for negative zero. 915 // Check for negative zero.
917 HMod* hmod = instr->hydrogen(); 916 HMod* hmod = instr->hydrogen();
918 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 917 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
919 Label remainder_not_zero; 918 Label remainder_not_zero;
920 __ bne(&remainder_not_zero, Label::kNear /*, cr0*/); 919 __ bne(&remainder_not_zero, Label::kNear /*, cr0*/);
921 __ Cmp32(dividend, Operand::Zero()); 920 __ Cmp32(dividend, Operand::Zero());
922 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 921 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
923 __ bind(&remainder_not_zero); 922 __ bind(&remainder_not_zero);
924 } 923 }
925 } 924 }
926 925
927 void LCodeGen::DoModI(LModI* instr) { 926 void LCodeGen::DoModI(LModI* instr) {
928 HMod* hmod = instr->hydrogen(); 927 HMod* hmod = instr->hydrogen();
929 Register left_reg = ToRegister(instr->left()); 928 Register left_reg = ToRegister(instr->left());
930 Register right_reg = ToRegister(instr->right()); 929 Register right_reg = ToRegister(instr->right());
931 Register result_reg = ToRegister(instr->result()); 930 Register result_reg = ToRegister(instr->result());
932 Label done; 931 Label done;
933 932
934 // Check for x % 0. 933 // Check for x % 0.
935 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 934 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
936 __ Cmp32(right_reg, Operand::Zero()); 935 __ Cmp32(right_reg, Operand::Zero());
937 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); 936 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
938 } 937 }
939 938
940 // Check for kMinInt % -1, dr will return undefined, which is not what we 939 // Check for kMinInt % -1, dr will return undefined, which is not what we
941 // want. We have to deopt if we care about -0, because we can't return that. 940 // want. We have to deopt if we care about -0, because we can't return that.
942 if (hmod->CheckFlag(HValue::kCanOverflow)) { 941 if (hmod->CheckFlag(HValue::kCanOverflow)) {
943 Label no_overflow_possible; 942 Label no_overflow_possible;
944 __ Cmp32(left_reg, Operand(kMinInt)); 943 __ Cmp32(left_reg, Operand(kMinInt));
945 __ bne(&no_overflow_possible, Label::kNear); 944 __ bne(&no_overflow_possible, Label::kNear);
946 __ Cmp32(right_reg, Operand(-1)); 945 __ Cmp32(right_reg, Operand(-1));
947 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 946 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
948 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 947 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
949 } else { 948 } else {
950 __ b(ne, &no_overflow_possible, Label::kNear); 949 __ b(ne, &no_overflow_possible, Label::kNear);
951 __ mov(result_reg, Operand::Zero()); 950 __ mov(result_reg, Operand::Zero());
952 __ b(&done, Label::kNear); 951 __ b(&done, Label::kNear);
953 } 952 }
954 __ bind(&no_overflow_possible); 953 __ bind(&no_overflow_possible);
955 } 954 }
956 955
957 // Divide instruction dr will implicity use register pair 956 // Divide instruction dr will implicity use register pair
958 // r0 & r1 below. 957 // r0 & r1 below.
959 DCHECK(!left_reg.is(r1)); 958 DCHECK(!left_reg.is(r1));
960 DCHECK(!right_reg.is(r1)); 959 DCHECK(!right_reg.is(r1));
961 DCHECK(!result_reg.is(r1)); 960 DCHECK(!result_reg.is(r1));
962 __ LoadRR(r0, left_reg); 961 __ LoadRR(r0, left_reg);
963 __ srda(r0, Operand(32)); 962 __ srda(r0, Operand(32));
964 __ dr(r0, right_reg); // R0:R1 = R1 / divisor - R0 remainder 963 __ dr(r0, right_reg); // R0:R1 = R1 / divisor - R0 remainder
965 964
966 __ LoadAndTestP_ExtendSrc(result_reg, r0); // Copy remainder to resultreg 965 __ LoadAndTestP_ExtendSrc(result_reg, r0); // Copy remainder to resultreg
967 966
968 // If we care about -0, test if the dividend is <0 and the result is 0. 967 // If we care about -0, test if the dividend is <0 and the result is 0.
969 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 968 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
970 __ bne(&done, Label::kNear); 969 __ bne(&done, Label::kNear);
971 __ Cmp32(left_reg, Operand::Zero()); 970 __ Cmp32(left_reg, Operand::Zero());
972 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 971 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
973 } 972 }
974 973
975 __ bind(&done); 974 __ bind(&done);
976 } 975 }
977 976
978 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 977 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
979 Register dividend = ToRegister(instr->dividend()); 978 Register dividend = ToRegister(instr->dividend());
980 int32_t divisor = instr->divisor(); 979 int32_t divisor = instr->divisor();
981 Register result = ToRegister(instr->result()); 980 Register result = ToRegister(instr->result());
982 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); 981 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
983 DCHECK(!result.is(dividend)); 982 DCHECK(!result.is(dividend));
984 983
985 // Check for (0 / -x) that will produce negative zero. 984 // Check for (0 / -x) that will produce negative zero.
986 HDiv* hdiv = instr->hydrogen(); 985 HDiv* hdiv = instr->hydrogen();
987 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 986 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
988 __ Cmp32(dividend, Operand::Zero()); 987 __ Cmp32(dividend, Operand::Zero());
989 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 988 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
990 } 989 }
991 // Check for (kMinInt / -1). 990 // Check for (kMinInt / -1).
992 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 991 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
993 __ Cmp32(dividend, Operand(0x80000000)); 992 __ Cmp32(dividend, Operand(0x80000000));
994 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 993 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
995 } 994 }
996 995
997 int32_t shift = WhichPowerOf2Abs(divisor); 996 int32_t shift = WhichPowerOf2Abs(divisor);
998 997
999 // Deoptimize if remainder will not be 0. 998 // Deoptimize if remainder will not be 0.
1000 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) { 999 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
1001 __ TestBitRange(dividend, shift - 1, 0, r0); 1000 __ TestBitRange(dividend, shift - 1, 0, r0);
1002 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0); 1001 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0);
1003 } 1002 }
1004 1003
1005 if (divisor == -1) { // Nice shortcut, not needed for correctness. 1004 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1006 __ LoadComplementRR(result, dividend); 1005 __ LoadComplementRR(result, dividend);
1007 return; 1006 return;
1008 } 1007 }
1009 if (shift == 0) { 1008 if (shift == 0) {
1010 __ LoadRR(result, dividend); 1009 __ LoadRR(result, dividend);
1011 } else { 1010 } else {
1012 if (shift == 1) { 1011 if (shift == 1) {
(...skipping 11 matching lines...) Expand all
1024 if (divisor < 0) __ LoadComplementRR(result, result); 1023 if (divisor < 0) __ LoadComplementRR(result, result);
1025 } 1024 }
1026 1025
1027 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 1026 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1028 Register dividend = ToRegister(instr->dividend()); 1027 Register dividend = ToRegister(instr->dividend());
1029 int32_t divisor = instr->divisor(); 1028 int32_t divisor = instr->divisor();
1030 Register result = ToRegister(instr->result()); 1029 Register result = ToRegister(instr->result());
1031 DCHECK(!dividend.is(result)); 1030 DCHECK(!dividend.is(result));
1032 1031
1033 if (divisor == 0) { 1032 if (divisor == 0) {
1034 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); 1033 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
1035 return; 1034 return;
1036 } 1035 }
1037 1036
1038 // Check for (0 / -x) that will produce negative zero. 1037 // Check for (0 / -x) that will produce negative zero.
1039 HDiv* hdiv = instr->hydrogen(); 1038 HDiv* hdiv = instr->hydrogen();
1040 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1039 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1041 __ Cmp32(dividend, Operand::Zero()); 1040 __ Cmp32(dividend, Operand::Zero());
1042 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 1041 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1043 } 1042 }
1044 1043
1045 __ TruncatingDiv(result, dividend, Abs(divisor)); 1044 __ TruncatingDiv(result, dividend, Abs(divisor));
1046 if (divisor < 0) __ LoadComplementRR(result, result); 1045 if (divisor < 0) __ LoadComplementRR(result, result);
1047 1046
1048 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1047 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1049 Register scratch = scratch0(); 1048 Register scratch = scratch0();
1050 __ mov(ip, Operand(divisor)); 1049 __ mov(ip, Operand(divisor));
1051 __ Mul(scratch, result, ip); 1050 __ Mul(scratch, result, ip);
1052 __ Cmp32(scratch, dividend); 1051 __ Cmp32(scratch, dividend);
1053 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); 1052 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
1054 } 1053 }
1055 } 1054 }
1056 1055
1057 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1056 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1058 void LCodeGen::DoDivI(LDivI* instr) { 1057 void LCodeGen::DoDivI(LDivI* instr) {
1059 HBinaryOperation* hdiv = instr->hydrogen(); 1058 HBinaryOperation* hdiv = instr->hydrogen();
1060 const Register dividend = ToRegister(instr->dividend()); 1059 const Register dividend = ToRegister(instr->dividend());
1061 const Register divisor = ToRegister(instr->divisor()); 1060 const Register divisor = ToRegister(instr->divisor());
1062 Register result = ToRegister(instr->result()); 1061 Register result = ToRegister(instr->result());
1063 1062
1064 DCHECK(!dividend.is(result)); 1063 DCHECK(!dividend.is(result));
1065 DCHECK(!divisor.is(result)); 1064 DCHECK(!divisor.is(result));
1066 1065
1067 // Check for x / 0. 1066 // Check for x / 0.
1068 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1067 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1069 __ Cmp32(divisor, Operand::Zero()); 1068 __ Cmp32(divisor, Operand::Zero());
1070 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); 1069 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
1071 } 1070 }
1072 1071
1073 // Check for (0 / -x) that will produce negative zero. 1072 // Check for (0 / -x) that will produce negative zero.
1074 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1073 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1075 Label dividend_not_zero; 1074 Label dividend_not_zero;
1076 __ Cmp32(dividend, Operand::Zero()); 1075 __ Cmp32(dividend, Operand::Zero());
1077 __ bne(&dividend_not_zero, Label::kNear); 1076 __ bne(&dividend_not_zero, Label::kNear);
1078 __ Cmp32(divisor, Operand::Zero()); 1077 __ Cmp32(divisor, Operand::Zero());
1079 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 1078 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
1080 __ bind(&dividend_not_zero); 1079 __ bind(&dividend_not_zero);
1081 } 1080 }
1082 1081
1083 // Check for (kMinInt / -1). 1082 // Check for (kMinInt / -1).
1084 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1083 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1085 Label dividend_not_min_int; 1084 Label dividend_not_min_int;
1086 __ Cmp32(dividend, Operand(kMinInt)); 1085 __ Cmp32(dividend, Operand(kMinInt));
1087 __ bne(&dividend_not_min_int, Label::kNear); 1086 __ bne(&dividend_not_min_int, Label::kNear);
1088 __ Cmp32(divisor, Operand(-1)); 1087 __ Cmp32(divisor, Operand(-1));
1089 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 1088 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
1090 __ bind(&dividend_not_min_int); 1089 __ bind(&dividend_not_min_int);
1091 } 1090 }
1092 1091
1093 __ LoadRR(r0, dividend); 1092 __ LoadRR(r0, dividend);
1094 __ srda(r0, Operand(32)); 1093 __ srda(r0, Operand(32));
1095 __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient 1094 __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
1096 1095
1097 __ LoadAndTestP_ExtendSrc(result, r1); // Move quotient to result register 1096 __ LoadAndTestP_ExtendSrc(result, r1); // Move quotient to result register
1098 1097
1099 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1098 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1100 // Deoptimize if remainder is not 0. 1099 // Deoptimize if remainder is not 0.
1101 __ Cmp32(r0, Operand::Zero()); 1100 __ Cmp32(r0, Operand::Zero());
1102 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); 1101 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
1103 } 1102 }
1104 } 1103 }
1105 1104
1106 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 1105 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1107 HBinaryOperation* hdiv = instr->hydrogen(); 1106 HBinaryOperation* hdiv = instr->hydrogen();
1108 Register dividend = ToRegister(instr->dividend()); 1107 Register dividend = ToRegister(instr->dividend());
1109 Register result = ToRegister(instr->result()); 1108 Register result = ToRegister(instr->result());
1110 int32_t divisor = instr->divisor(); 1109 int32_t divisor = instr->divisor();
1111 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt); 1110 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
1112 1111
1113 // If the divisor is positive, things are easy: There can be no deopts and we 1112 // If the divisor is positive, things are easy: There can be no deopts and we
1114 // can simply do an arithmetic right shift. 1113 // can simply do an arithmetic right shift.
1115 int32_t shift = WhichPowerOf2Abs(divisor); 1114 int32_t shift = WhichPowerOf2Abs(divisor);
1116 if (divisor > 0) { 1115 if (divisor > 0) {
1117 if (shift || !result.is(dividend)) { 1116 if (shift || !result.is(dividend)) {
1118 __ ShiftRightArith(result, dividend, Operand(shift)); 1117 __ ShiftRightArith(result, dividend, Operand(shift));
1119 #if V8_TARGET_ARCH_S390X 1118 #if V8_TARGET_ARCH_S390X
1120 __ lgfr(result, result); 1119 __ lgfr(result, result);
1121 #endif 1120 #endif
1122 } 1121 }
1123 return; 1122 return;
1124 } 1123 }
1125 1124
1126 // If the divisor is negative, we have to negate and handle edge cases. 1125 // If the divisor is negative, we have to negate and handle edge cases.
1127 #if V8_TARGET_ARCH_S390X 1126 #if V8_TARGET_ARCH_S390X
1128 if (divisor == -1 && can_overflow) { 1127 if (divisor == -1 && can_overflow) {
1129 __ Cmp32(dividend, Operand(0x80000000)); 1128 __ Cmp32(dividend, Operand(0x80000000));
1130 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 1129 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
1131 } 1130 }
1132 #endif 1131 #endif
1133 1132
1134 __ LoadComplementRR(result, dividend); 1133 __ LoadComplementRR(result, dividend);
1135 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1134 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1136 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0); 1135 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
1137 } 1136 }
1138 1137
1139 // If the negation could not overflow, simply shifting is OK. 1138 // If the negation could not overflow, simply shifting is OK.
1140 #if !V8_TARGET_ARCH_S390X 1139 #if !V8_TARGET_ARCH_S390X
1141 if (!can_overflow) { 1140 if (!can_overflow) {
1142 #endif 1141 #endif
1143 if (shift) { 1142 if (shift) {
1144 __ ShiftRightArithP(result, result, Operand(shift)); 1143 __ ShiftRightArithP(result, result, Operand(shift));
1145 } 1144 }
1146 return; 1145 return;
1147 #if !V8_TARGET_ARCH_S390X 1146 #if !V8_TARGET_ARCH_S390X
1148 } 1147 }
1149 1148
1150 // Dividing by -1 is basically negation, unless we overflow. 1149 // Dividing by -1 is basically negation, unless we overflow.
1151 if (divisor == -1) { 1150 if (divisor == -1) {
1152 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); 1151 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
1153 return; 1152 return;
1154 } 1153 }
1155 1154
1156 Label overflow_label, done; 1155 Label overflow_label, done;
1157 __ b(overflow, &overflow_label, Label::kNear); 1156 __ b(overflow, &overflow_label, Label::kNear);
1158 __ ShiftRightArith(result, result, Operand(shift)); 1157 __ ShiftRightArith(result, result, Operand(shift));
1159 #if V8_TARGET_ARCH_S390X 1158 #if V8_TARGET_ARCH_S390X
1160 __ lgfr(result, result); 1159 __ lgfr(result, result);
1161 #endif 1160 #endif
1162 __ b(&done, Label::kNear); 1161 __ b(&done, Label::kNear);
1163 __ bind(&overflow_label); 1162 __ bind(&overflow_label);
1164 __ mov(result, Operand(kMinInt / divisor)); 1163 __ mov(result, Operand(kMinInt / divisor));
1165 __ bind(&done); 1164 __ bind(&done);
1166 #endif 1165 #endif
1167 } 1166 }
1168 1167
1169 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 1168 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1170 Register dividend = ToRegister(instr->dividend()); 1169 Register dividend = ToRegister(instr->dividend());
1171 int32_t divisor = instr->divisor(); 1170 int32_t divisor = instr->divisor();
1172 Register result = ToRegister(instr->result()); 1171 Register result = ToRegister(instr->result());
1173 DCHECK(!dividend.is(result)); 1172 DCHECK(!dividend.is(result));
1174 1173
1175 if (divisor == 0) { 1174 if (divisor == 0) {
1176 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); 1175 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
1177 return; 1176 return;
1178 } 1177 }
1179 1178
1180 // Check for (0 / -x) that will produce negative zero. 1179 // Check for (0 / -x) that will produce negative zero.
1181 HMathFloorOfDiv* hdiv = instr->hydrogen(); 1180 HMathFloorOfDiv* hdiv = instr->hydrogen();
1182 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1181 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1183 __ Cmp32(dividend, Operand::Zero()); 1182 __ Cmp32(dividend, Operand::Zero());
1184 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 1183 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1185 } 1184 }
1186 1185
1187 // Easy case: We need no dynamic check for the dividend and the flooring 1186 // Easy case: We need no dynamic check for the dividend and the flooring
1188 // division is the same as the truncating division. 1187 // division is the same as the truncating division.
1189 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 1188 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1190 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 1189 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1191 __ TruncatingDiv(result, dividend, Abs(divisor)); 1190 __ TruncatingDiv(result, dividend, Abs(divisor));
1192 if (divisor < 0) __ LoadComplementRR(result, result); 1191 if (divisor < 0) __ LoadComplementRR(result, result);
1193 return; 1192 return;
1194 } 1193 }
(...skipping 22 matching lines...) Expand all
1217 const Register dividend = ToRegister(instr->dividend()); 1216 const Register dividend = ToRegister(instr->dividend());
1218 const Register divisor = ToRegister(instr->divisor()); 1217 const Register divisor = ToRegister(instr->divisor());
1219 Register result = ToRegister(instr->result()); 1218 Register result = ToRegister(instr->result());
1220 1219
1221 DCHECK(!dividend.is(result)); 1220 DCHECK(!dividend.is(result));
1222 DCHECK(!divisor.is(result)); 1221 DCHECK(!divisor.is(result));
1223 1222
1224 // Check for x / 0. 1223 // Check for x / 0.
1225 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1224 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1226 __ Cmp32(divisor, Operand::Zero()); 1225 __ Cmp32(divisor, Operand::Zero());
1227 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); 1226 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
1228 } 1227 }
1229 1228
1230 // Check for (0 / -x) that will produce negative zero. 1229 // Check for (0 / -x) that will produce negative zero.
1231 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1230 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1232 Label dividend_not_zero; 1231 Label dividend_not_zero;
1233 __ Cmp32(dividend, Operand::Zero()); 1232 __ Cmp32(dividend, Operand::Zero());
1234 __ bne(&dividend_not_zero, Label::kNear); 1233 __ bne(&dividend_not_zero, Label::kNear);
1235 __ Cmp32(divisor, Operand::Zero()); 1234 __ Cmp32(divisor, Operand::Zero());
1236 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 1235 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
1237 __ bind(&dividend_not_zero); 1236 __ bind(&dividend_not_zero);
1238 } 1237 }
1239 1238
1240 // Check for (kMinInt / -1). 1239 // Check for (kMinInt / -1).
1241 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1240 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1242 Label no_overflow_possible; 1241 Label no_overflow_possible;
1243 __ Cmp32(dividend, Operand(kMinInt)); 1242 __ Cmp32(dividend, Operand(kMinInt));
1244 __ bne(&no_overflow_possible, Label::kNear); 1243 __ bne(&no_overflow_possible, Label::kNear);
1245 __ Cmp32(divisor, Operand(-1)); 1244 __ Cmp32(divisor, Operand(-1));
1246 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1245 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1247 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 1246 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
1248 } else { 1247 } else {
1249 __ bne(&no_overflow_possible, Label::kNear); 1248 __ bne(&no_overflow_possible, Label::kNear);
1250 __ LoadRR(result, dividend); 1249 __ LoadRR(result, dividend);
1251 } 1250 }
1252 __ bind(&no_overflow_possible); 1251 __ bind(&no_overflow_possible);
1253 } 1252 }
1254 1253
1255 __ LoadRR(r0, dividend); 1254 __ LoadRR(r0, dividend);
1256 __ srda(r0, Operand(32)); 1255 __ srda(r0, Operand(32));
1257 __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient 1256 __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
1313 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 1312 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1314 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1313 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1315 1314
1316 if (right_op->IsConstantOperand()) { 1315 if (right_op->IsConstantOperand()) {
1317 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); 1316 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1318 1317
1319 if (bailout_on_minus_zero && (constant < 0)) { 1318 if (bailout_on_minus_zero && (constant < 0)) {
1320 // The case of a null constant will be handled separately. 1319 // The case of a null constant will be handled separately.
1321 // If constant is negative and left is null, the result should be -0. 1320 // If constant is negative and left is null, the result should be -0.
1322 __ CmpP(left, Operand::Zero()); 1321 __ CmpP(left, Operand::Zero());
1323 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 1322 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1324 } 1323 }
1325 1324
1326 switch (constant) { 1325 switch (constant) {
1327 case -1: 1326 case -1:
1328 if (can_overflow) { 1327 if (can_overflow) {
1329 #if V8_TARGET_ARCH_S390X 1328 #if V8_TARGET_ARCH_S390X
1330 if (instr->hydrogen()->representation().IsSmi()) { 1329 if (instr->hydrogen()->representation().IsSmi()) {
1331 #endif 1330 #endif
1332 __ LoadComplementRR(result, left); 1331 __ LoadComplementRR(result, left);
1333 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 1332 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1334 #if V8_TARGET_ARCH_S390X 1333 #if V8_TARGET_ARCH_S390X
1335 } else { 1334 } else {
1336 __ LoadComplementRR(result, left); 1335 __ LoadComplementRR(result, left);
1337 __ TestIfInt32(result, r0); 1336 __ TestIfInt32(result, r0);
1338 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 1337 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
1339 } 1338 }
1340 #endif 1339 #endif
1341 } else { 1340 } else {
1342 __ LoadComplementRR(result, left); 1341 __ LoadComplementRR(result, left);
1343 } 1342 }
1344 break; 1343 break;
1345 case 0: 1344 case 0:
1346 if (bailout_on_minus_zero) { 1345 if (bailout_on_minus_zero) {
1347 // If left is strictly negative and the constant is null, the 1346 // If left is strictly negative and the constant is null, the
1348 // result is -0. Deoptimize if required, otherwise return 0. 1347 // result is -0. Deoptimize if required, otherwise return 0.
1349 #if V8_TARGET_ARCH_S390X 1348 #if V8_TARGET_ARCH_S390X
1350 if (instr->hydrogen()->representation().IsSmi()) { 1349 if (instr->hydrogen()->representation().IsSmi()) {
1351 #endif 1350 #endif
1352 __ Cmp32(left, Operand::Zero()); 1351 __ Cmp32(left, Operand::Zero());
1353 #if V8_TARGET_ARCH_S390X 1352 #if V8_TARGET_ARCH_S390X
1354 } else { 1353 } else {
1355 __ Cmp32(left, Operand::Zero()); 1354 __ Cmp32(left, Operand::Zero());
1356 } 1355 }
1357 #endif 1356 #endif
1358 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 1357 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
1359 } 1358 }
1360 __ LoadImmP(result, Operand::Zero()); 1359 __ LoadImmP(result, Operand::Zero());
1361 break; 1360 break;
1362 case 1: 1361 case 1:
1363 __ Move(result, left); 1362 __ Move(result, left);
1364 break; 1363 break;
1365 default: 1364 default:
1366 // Multiplying by powers of two and powers of two plus or minus 1365 // Multiplying by powers of two and powers of two plus or minus
1367 // one can be done faster with shifted operands. 1366 // one can be done faster with shifted operands.
1368 // For other constants we emit standard code. 1367 // For other constants we emit standard code.
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1402 // result = left * right. 1401 // result = left * right.
1403 if (instr->hydrogen()->representation().IsSmi()) { 1402 if (instr->hydrogen()->representation().IsSmi()) {
1404 __ SmiUntag(result, left); 1403 __ SmiUntag(result, left);
1405 __ SmiUntag(scratch, right); 1404 __ SmiUntag(scratch, right);
1406 __ msgr(result, scratch); 1405 __ msgr(result, scratch);
1407 } else { 1406 } else {
1408 __ LoadRR(result, left); 1407 __ LoadRR(result, left);
1409 __ msgr(result, right); 1408 __ msgr(result, right);
1410 } 1409 }
1411 __ TestIfInt32(result, r0); 1410 __ TestIfInt32(result, r0);
1412 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 1411 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
1413 if (instr->hydrogen()->representation().IsSmi()) { 1412 if (instr->hydrogen()->representation().IsSmi()) {
1414 __ SmiTag(result); 1413 __ SmiTag(result);
1415 } 1414 }
1416 #else 1415 #else
1417 // r0:scratch = scratch * right 1416 // r0:scratch = scratch * right
1418 if (instr->hydrogen()->representation().IsSmi()) { 1417 if (instr->hydrogen()->representation().IsSmi()) {
1419 __ SmiUntag(scratch, left); 1418 __ SmiUntag(scratch, left);
1420 __ mr_z(r0, right); 1419 __ mr_z(r0, right);
1421 __ LoadRR(result, scratch); 1420 __ LoadRR(result, scratch);
1422 } else { 1421 } else {
1423 // r0:scratch = scratch * right 1422 // r0:scratch = scratch * right
1424 __ LoadRR(scratch, left); 1423 __ LoadRR(scratch, left);
1425 __ mr_z(r0, right); 1424 __ mr_z(r0, right);
1426 __ LoadRR(result, scratch); 1425 __ LoadRR(result, scratch);
1427 } 1426 }
1428 __ TestIfInt32(r0, result, scratch); 1427 __ TestIfInt32(r0, result, scratch);
1429 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 1428 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
1430 #endif 1429 #endif
1431 } else { 1430 } else {
1432 if (instr->hydrogen()->representation().IsSmi()) { 1431 if (instr->hydrogen()->representation().IsSmi()) {
1433 __ SmiUntag(result, left); 1432 __ SmiUntag(result, left);
1434 __ Mul(result, result, right); 1433 __ Mul(result, result, right);
1435 } else { 1434 } else {
1436 __ Mul(result, left, right); 1435 __ Mul(result, left, right);
1437 } 1436 }
1438 } 1437 }
1439 1438
1440 if (bailout_on_minus_zero) { 1439 if (bailout_on_minus_zero) {
1441 Label done; 1440 Label done;
1442 #if V8_TARGET_ARCH_S390X 1441 #if V8_TARGET_ARCH_S390X
1443 if (instr->hydrogen()->representation().IsSmi()) { 1442 if (instr->hydrogen()->representation().IsSmi()) {
1444 #endif 1443 #endif
1445 __ XorP(r0, left, right); 1444 __ XorP(r0, left, right);
1446 __ LoadAndTestRR(r0, r0); 1445 __ LoadAndTestRR(r0, r0);
1447 __ bge(&done, Label::kNear); 1446 __ bge(&done, Label::kNear);
1448 #if V8_TARGET_ARCH_S390X 1447 #if V8_TARGET_ARCH_S390X
1449 } else { 1448 } else {
1450 __ XorP(r0, left, right); 1449 __ XorP(r0, left, right);
1451 __ Cmp32(r0, Operand::Zero()); 1450 __ Cmp32(r0, Operand::Zero());
1452 __ bge(&done, Label::kNear); 1451 __ bge(&done, Label::kNear);
1453 } 1452 }
1454 #endif 1453 #endif
1455 // Bail out if the result is minus zero. 1454 // Bail out if the result is minus zero.
1456 __ CmpP(result, Operand::Zero()); 1455 __ CmpP(result, Operand::Zero());
1457 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 1456 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1458 __ bind(&done); 1457 __ bind(&done);
1459 } 1458 }
1460 } 1459 }
1461 } 1460 }
1462 1461
1463 void LCodeGen::DoBitI(LBitI* instr) { 1462 void LCodeGen::DoBitI(LBitI* instr) {
1464 LOperand* left_op = instr->left(); 1463 LOperand* left_op = instr->left();
1465 LOperand* right_op = instr->right(); 1464 LOperand* right_op = instr->right();
1466 DCHECK(left_op->IsRegister()); 1465 DCHECK(left_op->IsRegister());
1467 Register left = ToRegister(left_op); 1466 Register left = ToRegister(left_op);
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
1548 __ ShiftRight(result, left, scratch); 1547 __ ShiftRight(result, left, scratch);
1549 #if V8_TARGET_ARCH_S390X 1548 #if V8_TARGET_ARCH_S390X
1550 __ lgfr(result, result); 1549 __ lgfr(result, result);
1551 #endif 1550 #endif
1552 if (instr->can_deopt()) { 1551 if (instr->can_deopt()) {
1553 #if V8_TARGET_ARCH_S390X 1552 #if V8_TARGET_ARCH_S390X
1554 __ ltgfr(result, result /*, SetRC*/); 1553 __ ltgfr(result, result /*, SetRC*/);
1555 #else 1554 #else
1556 __ ltr(result, result); // Set the <,==,> condition 1555 __ ltr(result, result); // Set the <,==,> condition
1557 #endif 1556 #endif
1558 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0); 1557 DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0);
1559 } 1558 }
1560 break; 1559 break;
1561 case Token::SHL: 1560 case Token::SHL:
1562 __ ShiftLeft(result, left, scratch); 1561 __ ShiftLeft(result, left, scratch);
1563 #if V8_TARGET_ARCH_S390X 1562 #if V8_TARGET_ARCH_S390X
1564 __ lgfr(result, result); 1563 __ lgfr(result, result);
1565 #endif 1564 #endif
1566 break; 1565 break;
1567 default: 1566 default:
1568 UNREACHABLE(); 1567 UNREACHABLE();
(...skipping 26 matching lines...) Expand all
1595 break; 1594 break;
1596 case Token::SHR: 1595 case Token::SHR:
1597 if (shift_count != 0) { 1596 if (shift_count != 0) {
1598 __ ShiftRight(result, left, Operand(shift_count)); 1597 __ ShiftRight(result, left, Operand(shift_count));
1599 #if V8_TARGET_ARCH_S390X 1598 #if V8_TARGET_ARCH_S390X
1600 __ lgfr(result, result); 1599 __ lgfr(result, result);
1601 #endif 1600 #endif
1602 } else { 1601 } else {
1603 if (instr->can_deopt()) { 1602 if (instr->can_deopt()) {
1604 __ Cmp32(left, Operand::Zero()); 1603 __ Cmp32(left, Operand::Zero());
1605 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue); 1604 DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue);
1606 } 1605 }
1607 __ Move(result, left); 1606 __ Move(result, left);
1608 } 1607 }
1609 break; 1608 break;
1610 case Token::SHL: 1609 case Token::SHL:
1611 if (shift_count != 0) { 1610 if (shift_count != 0) {
1612 #if V8_TARGET_ARCH_S390X 1611 #if V8_TARGET_ARCH_S390X
1613 if (instr->hydrogen_value()->representation().IsSmi()) { 1612 if (instr->hydrogen_value()->representation().IsSmi()) {
1614 __ ShiftLeftP(result, left, Operand(shift_count)); 1613 __ ShiftLeftP(result, left, Operand(shift_count));
1615 #else 1614 #else
1616 if (instr->hydrogen_value()->representation().IsSmi() && 1615 if (instr->hydrogen_value()->representation().IsSmi() &&
1617 instr->can_deopt()) { 1616 instr->can_deopt()) {
1618 if (shift_count != 1) { 1617 if (shift_count != 1) {
1619 __ ShiftLeft(result, left, Operand(shift_count - 1)); 1618 __ ShiftLeft(result, left, Operand(shift_count - 1));
1620 #if V8_TARGET_ARCH_S390X 1619 #if V8_TARGET_ARCH_S390X
1621 __ lgfr(result, result); 1620 __ lgfr(result, result);
1622 #endif 1621 #endif
1623 __ SmiTagCheckOverflow(result, result, scratch); 1622 __ SmiTagCheckOverflow(result, result, scratch);
1624 } else { 1623 } else {
1625 __ SmiTagCheckOverflow(result, left, scratch); 1624 __ SmiTagCheckOverflow(result, left, scratch);
1626 } 1625 }
1627 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); 1626 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
1628 #endif 1627 #endif
1629 } else { 1628 } else {
1630 __ ShiftLeft(result, left, Operand(shift_count)); 1629 __ ShiftLeft(result, left, Operand(shift_count));
1631 #if V8_TARGET_ARCH_S390X 1630 #if V8_TARGET_ARCH_S390X
1632 __ lgfr(result, result); 1631 __ lgfr(result, result);
1633 #endif 1632 #endif
1634 } 1633 }
1635 } else { 1634 } else {
1636 __ Move(result, left); 1635 __ Move(result, left);
1637 } 1636 }
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
1693 __ SubP_ExtendSrc(ToRegister(result), Upper32Mem); 1692 __ SubP_ExtendSrc(ToRegister(result), Upper32Mem);
1694 } 1693 }
1695 } 1694 }
1696 } 1695 }
1697 1696
1698 #if V8_TARGET_ARCH_S390X 1697 #if V8_TARGET_ARCH_S390X
1699 if (isInteger && checkOverflow) 1698 if (isInteger && checkOverflow)
1700 __ lgfr(ToRegister(result), ToRegister(result)); 1699 __ lgfr(ToRegister(result), ToRegister(result));
1701 #endif 1700 #endif
1702 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1701 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1703 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 1702 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1704 } 1703 }
1705 } 1704 }
1706 1705
1707 void LCodeGen::DoRSubI(LRSubI* instr) { 1706 void LCodeGen::DoRSubI(LRSubI* instr) {
1708 LOperand* left = instr->left(); 1707 LOperand* left = instr->left();
1709 LOperand* right = instr->right(); 1708 LOperand* right = instr->right();
1710 LOperand* result = instr->result(); 1709 LOperand* result = instr->result();
1711 1710
1712 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) && 1711 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
1713 right->IsConstantOperand()); 1712 right->IsConstantOperand());
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
1881 } 1880 }
1882 } 1881 }
1883 } 1882 }
1884 1883
1885 #if V8_TARGET_ARCH_S390X 1884 #if V8_TARGET_ARCH_S390X
1886 if (isInteger && checkOverflow) 1885 if (isInteger && checkOverflow)
1887 __ lgfr(ToRegister(result), ToRegister(result)); 1886 __ lgfr(ToRegister(result), ToRegister(result));
1888 #endif 1887 #endif
1889 // Doptimize on overflow 1888 // Doptimize on overflow
1890 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1889 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1891 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); 1890 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1892 } 1891 }
1893 } 1892 }
1894 1893
1895 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 1894 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1896 LOperand* left = instr->left(); 1895 LOperand* left = instr->left();
1897 LOperand* right = instr->right(); 1896 LOperand* right = instr->right();
1898 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 1897 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1899 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge; 1898 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
1900 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 1899 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1901 Register left_reg = ToRegister(left); 1900 Register left_reg = ToRegister(left);
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after
2124 } 2123 }
2125 2124
2126 if (expected.Contains(ToBooleanICStub::SMI)) { 2125 if (expected.Contains(ToBooleanICStub::SMI)) {
2127 // Smis: 0 -> false, all other -> true. 2126 // Smis: 0 -> false, all other -> true.
2128 __ CmpP(reg, Operand::Zero()); 2127 __ CmpP(reg, Operand::Zero());
2129 __ beq(instr->FalseLabel(chunk_)); 2128 __ beq(instr->FalseLabel(chunk_));
2130 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2129 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2131 } else if (expected.NeedsMap()) { 2130 } else if (expected.NeedsMap()) {
2132 // If we need a map later and have a Smi -> deopt. 2131 // If we need a map later and have a Smi -> deopt.
2133 __ TestIfSmi(reg); 2132 __ TestIfSmi(reg);
2134 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); 2133 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
2135 } 2134 }
2136 2135
2137 const Register map = scratch0(); 2136 const Register map = scratch0();
2138 if (expected.NeedsMap()) { 2137 if (expected.NeedsMap()) {
2139 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset)); 2138 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2140 2139
2141 if (expected.CanBeUndetectable()) { 2140 if (expected.CanBeUndetectable()) {
2142 // Undetectable -> false. 2141 // Undetectable -> false.
2143 __ tm(FieldMemOperand(map, Map::kBitFieldOffset), 2142 __ tm(FieldMemOperand(map, Map::kBitFieldOffset),
2144 Operand(1 << Map::kIsUndetectable)); 2143 Operand(1 << Map::kIsUndetectable));
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
2188 __ cdbr(dbl_scratch, kDoubleRegZero); 2187 __ cdbr(dbl_scratch, kDoubleRegZero);
2189 __ bunordered(instr->FalseLabel(chunk_)); // NaN -> false. 2188 __ bunordered(instr->FalseLabel(chunk_)); // NaN -> false.
2190 __ beq(instr->FalseLabel(chunk_)); // +0, -0 -> false. 2189 __ beq(instr->FalseLabel(chunk_)); // +0, -0 -> false.
2191 __ b(instr->TrueLabel(chunk_)); 2190 __ b(instr->TrueLabel(chunk_));
2192 __ bind(&not_heap_number); 2191 __ bind(&not_heap_number);
2193 } 2192 }
2194 2193
2195 if (!expected.IsGeneric()) { 2194 if (!expected.IsGeneric()) {
2196 // We've seen something for the first time -> deopt. 2195 // We've seen something for the first time -> deopt.
2197 // This can only happen if we are not generic already. 2196 // This can only happen if we are not generic already.
2198 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject); 2197 DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
2199 } 2198 }
2200 } 2199 }
2201 } 2200 }
2202 } 2201 }
2203 2202
2204 void LCodeGen::EmitGoto(int block) { 2203 void LCodeGen::EmitGoto(int block) {
2205 if (!IsNextEmittedBlock(block)) { 2204 if (!IsNextEmittedBlock(block)) {
2206 __ b(chunk_->GetAssemblyLabel(LookupDestination(block))); 2205 __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
2207 } 2206 }
2208 } 2207 }
(...skipping 342 matching lines...) Expand 10 before | Expand all | Expand 10 after
2551 } 2550 }
2552 // Loop through the {object}s prototype chain looking for the {prototype}. 2551 // Loop through the {object}s prototype chain looking for the {prototype}.
2553 __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); 2552 __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2554 Label loop; 2553 Label loop;
2555 __ bind(&loop); 2554 __ bind(&loop);
2556 2555
2557 // Deoptimize if the object needs to be access checked. 2556 // Deoptimize if the object needs to be access checked.
2558 __ LoadlB(object_instance_type, 2557 __ LoadlB(object_instance_type,
2559 FieldMemOperand(object_map, Map::kBitFieldOffset)); 2558 FieldMemOperand(object_map, Map::kBitFieldOffset));
2560 __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0); 2559 __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
2561 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0); 2560 DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0);
2562 // Deoptimize for proxies. 2561 // Deoptimize for proxies.
2563 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); 2562 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
2564 DeoptimizeIf(eq, instr, Deoptimizer::kProxy); 2563 DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
2565 __ LoadP(object_prototype, 2564 __ LoadP(object_prototype,
2566 FieldMemOperand(object_map, Map::kPrototypeOffset)); 2565 FieldMemOperand(object_map, Map::kPrototypeOffset));
2567 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); 2566 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
2568 EmitFalseBranch(instr, eq); 2567 EmitFalseBranch(instr, eq);
2569 __ CmpP(object_prototype, prototype); 2568 __ CmpP(object_prototype, prototype);
2570 EmitTrueBranch(instr, eq); 2569 EmitTrueBranch(instr, eq);
2571 __ LoadP(object_map, 2570 __ LoadP(object_map,
2572 FieldMemOperand(object_prototype, HeapObject::kMapOffset)); 2571 FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2573 __ b(&loop); 2572 __ b(&loop);
2574 } 2573 }
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
2675 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2674 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2676 } 2675 }
2677 2676
2678 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2677 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2679 Register context = ToRegister(instr->context()); 2678 Register context = ToRegister(instr->context());
2680 Register result = ToRegister(instr->result()); 2679 Register result = ToRegister(instr->result());
2681 __ LoadP(result, ContextMemOperand(context, instr->slot_index())); 2680 __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
2682 if (instr->hydrogen()->RequiresHoleCheck()) { 2681 if (instr->hydrogen()->RequiresHoleCheck()) {
2683 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); 2682 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2684 if (instr->hydrogen()->DeoptimizesOnHole()) { 2683 if (instr->hydrogen()->DeoptimizesOnHole()) {
2685 DeoptimizeIf(eq, instr, Deoptimizer::kHole); 2684 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
2686 } else { 2685 } else {
2687 Label skip; 2686 Label skip;
2688 __ bne(&skip, Label::kNear); 2687 __ bne(&skip, Label::kNear);
2689 __ mov(result, Operand(factory()->undefined_value())); 2688 __ mov(result, Operand(factory()->undefined_value()));
2690 __ bind(&skip); 2689 __ bind(&skip);
2691 } 2690 }
2692 } 2691 }
2693 } 2692 }
2694 2693
2695 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 2694 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2696 Register context = ToRegister(instr->context()); 2695 Register context = ToRegister(instr->context());
2697 Register value = ToRegister(instr->value()); 2696 Register value = ToRegister(instr->value());
2698 Register scratch = scratch0(); 2697 Register scratch = scratch0();
2699 MemOperand target = ContextMemOperand(context, instr->slot_index()); 2698 MemOperand target = ContextMemOperand(context, instr->slot_index());
2700 2699
2701 Label skip_assignment; 2700 Label skip_assignment;
2702 2701
2703 if (instr->hydrogen()->RequiresHoleCheck()) { 2702 if (instr->hydrogen()->RequiresHoleCheck()) {
2704 __ LoadP(scratch, target); 2703 __ LoadP(scratch, target);
2705 __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex); 2704 __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
2706 if (instr->hydrogen()->DeoptimizesOnHole()) { 2705 if (instr->hydrogen()->DeoptimizesOnHole()) {
2707 DeoptimizeIf(eq, instr, Deoptimizer::kHole); 2706 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
2708 } else { 2707 } else {
2709 __ bne(&skip_assignment); 2708 __ bne(&skip_assignment);
2710 } 2709 }
2711 } 2710 }
2712 2711
2713 __ StoreP(value, target); 2712 __ StoreP(value, target);
2714 if (instr->hydrogen()->NeedsWriteBarrier()) { 2713 if (instr->hydrogen()->NeedsWriteBarrier()) {
2715 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() 2714 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2716 ? OMIT_SMI_CHECK 2715 ? OMIT_SMI_CHECK
2717 : INLINE_SMI_CHECK; 2716 : INLINE_SMI_CHECK;
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
2780 Register scratch = scratch0(); 2779 Register scratch = scratch0();
2781 Register function = ToRegister(instr->function()); 2780 Register function = ToRegister(instr->function());
2782 Register result = ToRegister(instr->result()); 2781 Register result = ToRegister(instr->result());
2783 2782
2784 // Get the prototype or initial map from the function. 2783 // Get the prototype or initial map from the function.
2785 __ LoadP(result, 2784 __ LoadP(result,
2786 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 2785 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2787 2786
2788 // Check that the function has a prototype or an initial map. 2787 // Check that the function has a prototype or an initial map.
2789 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); 2788 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2790 DeoptimizeIf(eq, instr, Deoptimizer::kHole); 2789 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
2791 2790
2792 // If the function does not have an initial map, we're done. 2791 // If the function does not have an initial map, we're done.
2793 Label done; 2792 Label done;
2794 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); 2793 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2795 __ bne(&done, Label::kNear); 2794 __ bne(&done, Label::kNear);
2796 2795
2797 // Get the prototype from the initial map. 2796 // Get the prototype from the initial map.
2798 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); 2797 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2799 2798
2800 // All done. 2799 // All done.
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
2913 case UINT16_ELEMENTS: 2912 case UINT16_ELEMENTS:
2914 __ LoadLogicalHalfWordP(result, mem_operand); 2913 __ LoadLogicalHalfWordP(result, mem_operand);
2915 break; 2914 break;
2916 case INT32_ELEMENTS: 2915 case INT32_ELEMENTS:
2917 __ LoadW(result, mem_operand, r0); 2916 __ LoadW(result, mem_operand, r0);
2918 break; 2917 break;
2919 case UINT32_ELEMENTS: 2918 case UINT32_ELEMENTS:
2920 __ LoadlW(result, mem_operand, r0); 2919 __ LoadlW(result, mem_operand, r0);
2921 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 2920 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2922 __ CmpLogical32(result, Operand(0x80000000)); 2921 __ CmpLogical32(result, Operand(0x80000000));
2923 DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue); 2922 DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue);
2924 } 2923 }
2925 break; 2924 break;
2926 case FLOAT32_ELEMENTS: 2925 case FLOAT32_ELEMENTS:
2927 case FLOAT64_ELEMENTS: 2926 case FLOAT64_ELEMENTS:
2928 case FAST_HOLEY_DOUBLE_ELEMENTS: 2927 case FAST_HOLEY_DOUBLE_ELEMENTS:
2929 case FAST_HOLEY_ELEMENTS: 2928 case FAST_HOLEY_ELEMENTS:
2930 case FAST_HOLEY_SMI_ELEMENTS: 2929 case FAST_HOLEY_SMI_ELEMENTS:
2931 case FAST_DOUBLE_ELEMENTS: 2930 case FAST_DOUBLE_ELEMENTS:
2932 case FAST_ELEMENTS: 2931 case FAST_ELEMENTS:
2933 case FAST_SMI_ELEMENTS: 2932 case FAST_SMI_ELEMENTS:
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
2992 2991
2993 if (instr->hydrogen()->RequiresHoleCheck()) { 2992 if (instr->hydrogen()->RequiresHoleCheck()) {
2994 if (!use_scratch) { 2993 if (!use_scratch) {
2995 __ LoadlW(r0, 2994 __ LoadlW(r0,
2996 MemOperand(elements, base_offset + Register::kExponentOffset)); 2995 MemOperand(elements, base_offset + Register::kExponentOffset));
2997 } else { 2996 } else {
2998 __ LoadlW(r0, MemOperand(scratch, elements, 2997 __ LoadlW(r0, MemOperand(scratch, elements,
2999 base_offset + Register::kExponentOffset)); 2998 base_offset + Register::kExponentOffset));
3000 } 2999 }
3001 __ Cmp32(r0, Operand(kHoleNanUpper32)); 3000 __ Cmp32(r0, Operand(kHoleNanUpper32));
3002 DeoptimizeIf(eq, instr, Deoptimizer::kHole); 3001 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
3003 } 3002 }
3004 } 3003 }
3005 3004
3006 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3005 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3007 HLoadKeyed* hinstr = instr->hydrogen(); 3006 HLoadKeyed* hinstr = instr->hydrogen();
3008 Register elements = ToRegister(instr->elements()); 3007 Register elements = ToRegister(instr->elements());
3009 Register result = ToRegister(instr->result()); 3008 Register result = ToRegister(instr->result());
3010 Register scratch = scratch0(); 3009 Register scratch = scratch0();
3011 int offset = instr->base_offset(); 3010 int offset = instr->base_offset();
3012 3011
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
3044 r1); 3043 r1);
3045 } else { 3044 } else {
3046 __ LoadRepresentation(result, MemOperand(scratch, elements, offset), 3045 __ LoadRepresentation(result, MemOperand(scratch, elements, offset),
3047 representation, r1); 3046 representation, r1);
3048 } 3047 }
3049 3048
3050 // Check for the hole value. 3049 // Check for the hole value.
3051 if (requires_hole_check) { 3050 if (requires_hole_check) {
3052 if (IsFastSmiElementsKind(hinstr->elements_kind())) { 3051 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3053 __ TestIfSmi(result); 3052 __ TestIfSmi(result);
3054 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); 3053 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
3055 } else { 3054 } else {
3056 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); 3055 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3057 DeoptimizeIf(eq, instr, Deoptimizer::kHole); 3056 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
3058 } 3057 }
3059 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { 3058 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3060 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); 3059 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3061 Label done; 3060 Label done;
3062 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 3061 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3063 __ CmpP(result, scratch); 3062 __ CmpP(result, scratch);
3064 __ bne(&done); 3063 __ bne(&done);
3065 if (info()->IsStub()) { 3064 if (info()->IsStub()) {
3066 // A stub can safely convert the hole to undefined only if the array 3065 // A stub can safely convert the hole to undefined only if the array
3067 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise 3066 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3068 // it needs to bail out. 3067 // it needs to bail out.
3069 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); 3068 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3070 __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset)); 3069 __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
3071 __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0); 3070 __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
3072 DeoptimizeIf(ne, instr, Deoptimizer::kHole); 3071 DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
3073 } 3072 }
3074 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 3073 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3075 __ bind(&done); 3074 __ bind(&done);
3076 } 3075 }
3077 } 3076 }
3078 3077
3079 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { 3078 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3080 if (instr->is_fixed_typed_array()) { 3079 if (instr->is_fixed_typed_array()) {
3081 DoLoadKeyedExternalArray(instr); 3080 DoLoadKeyedExternalArray(instr);
3082 } else if (instr->hydrogen()->representation().IsDouble()) { 3081 } else if (instr->hydrogen()->representation().IsDouble()) {
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
3206 } 3205 }
3207 3206
3208 // Normal function. Replace undefined or null with global receiver. 3207 // Normal function. Replace undefined or null with global receiver.
3209 __ CompareRoot(receiver, Heap::kNullValueRootIndex); 3208 __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3210 __ beq(&global_object, Label::kNear); 3209 __ beq(&global_object, Label::kNear);
3211 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex); 3210 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3212 __ beq(&global_object, Label::kNear); 3211 __ beq(&global_object, Label::kNear);
3213 3212
3214 // Deoptimize if the receiver is not a JS object. 3213 // Deoptimize if the receiver is not a JS object.
3215 __ TestIfSmi(receiver); 3214 __ TestIfSmi(receiver);
3216 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); 3215 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
3217 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE); 3216 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
3218 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject); 3217 DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
3219 3218
3220 __ b(&result_in_receiver, Label::kNear); 3219 __ b(&result_in_receiver, Label::kNear);
3221 __ bind(&global_object); 3220 __ bind(&global_object);
3222 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset)); 3221 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
3223 __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); 3222 __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3224 __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); 3223 __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3225 3224
3226 if (result.is(receiver)) { 3225 if (result.is(receiver)) {
3227 __ bind(&result_in_receiver); 3226 __ bind(&result_in_receiver);
3228 } else { 3227 } else {
(...skipping 12 matching lines...) Expand all
3241 Register elements = ToRegister(instr->elements()); 3240 Register elements = ToRegister(instr->elements());
3242 Register scratch = scratch0(); 3241 Register scratch = scratch0();
3243 DCHECK(receiver.is(r2)); // Used for parameter count. 3242 DCHECK(receiver.is(r2)); // Used for parameter count.
3244 DCHECK(function.is(r3)); // Required by InvokeFunction. 3243 DCHECK(function.is(r3)); // Required by InvokeFunction.
3245 DCHECK(ToRegister(instr->result()).is(r2)); 3244 DCHECK(ToRegister(instr->result()).is(r2));
3246 3245
3247 // Copy the arguments to this function possibly from the 3246 // Copy the arguments to this function possibly from the
3248 // adaptor frame below it. 3247 // adaptor frame below it.
3249 const uint32_t kArgumentsLimit = 1 * KB; 3248 const uint32_t kArgumentsLimit = 1 * KB;
3250 __ CmpLogicalP(length, Operand(kArgumentsLimit)); 3249 __ CmpLogicalP(length, Operand(kArgumentsLimit));
3251 DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments); 3250 DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments);
3252 3251
3253 // Push the receiver and use the register to keep the original 3252 // Push the receiver and use the register to keep the original
3254 // number of arguments. 3253 // number of arguments.
3255 __ push(receiver); 3254 __ push(receiver);
3256 __ LoadRR(receiver, length); 3255 __ LoadRR(receiver, length);
3257 // The arguments are at a one pointer size offset from elements. 3256 // The arguments are at a one pointer size offset from elements.
3258 __ AddP(elements, Operand(1 * kPointerSize)); 3257 __ AddP(elements, Operand(1 * kPointerSize));
3259 3258
3260 // Loop through the arguments pushing them onto the execution 3259 // Loop through the arguments pushing them onto the execution
3261 // stack. 3260 // stack.
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
3386 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3385 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3387 DCHECK(instr->context() != NULL); 3386 DCHECK(instr->context() != NULL);
3388 DCHECK(ToRegister(instr->context()).is(cp)); 3387 DCHECK(ToRegister(instr->context()).is(cp));
3389 Register input = ToRegister(instr->value()); 3388 Register input = ToRegister(instr->value());
3390 Register result = ToRegister(instr->result()); 3389 Register result = ToRegister(instr->result());
3391 Register scratch = scratch0(); 3390 Register scratch = scratch0();
3392 3391
3393 // Deoptimize if not a heap number. 3392 // Deoptimize if not a heap number.
3394 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 3393 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3395 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); 3394 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3396 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); 3395 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
3397 3396
3398 Label done; 3397 Label done;
3399 Register exponent = scratch0(); 3398 Register exponent = scratch0();
3400 scratch = no_reg; 3399 scratch = no_reg;
3401 __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3400 __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3402 // Check the sign of the argument. If the argument is positive, just 3401 // Check the sign of the argument. If the argument is positive, just
3403 // return it. 3402 // return it.
3404 __ Cmp32(exponent, Operand::Zero()); 3403 __ Cmp32(exponent, Operand::Zero());
3405 // Move the input to the result if necessary. 3404 // Move the input to the result if necessary.
3406 __ Move(result, input); 3405 __ Move(result, input);
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
3454 3453
3455 void LCodeGen::EmitMathAbs(LMathAbs* instr) { 3454 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
3456 Register input = ToRegister(instr->value()); 3455 Register input = ToRegister(instr->value());
3457 Register result = ToRegister(instr->result()); 3456 Register result = ToRegister(instr->result());
3458 Label done; 3457 Label done;
3459 __ CmpP(input, Operand::Zero()); 3458 __ CmpP(input, Operand::Zero());
3460 __ Move(result, input); 3459 __ Move(result, input);
3461 __ bge(&done, Label::kNear); 3460 __ bge(&done, Label::kNear);
3462 __ LoadComplementRR(result, result); 3461 __ LoadComplementRR(result, result);
3463 // Deoptimize on overflow. 3462 // Deoptimize on overflow.
3464 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); 3463 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
3465 __ bind(&done); 3464 __ bind(&done);
3466 } 3465 }
3467 3466
3468 #if V8_TARGET_ARCH_S390X 3467 #if V8_TARGET_ARCH_S390X
3469 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) { 3468 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
3470 Register input = ToRegister(instr->value()); 3469 Register input = ToRegister(instr->value());
3471 Register result = ToRegister(instr->result()); 3470 Register result = ToRegister(instr->result());
3472 Label done; 3471 Label done;
3473 __ Cmp32(input, Operand::Zero()); 3472 __ Cmp32(input, Operand::Zero());
3474 __ Move(result, input); 3473 __ Move(result, input);
3475 __ bge(&done, Label::kNear); 3474 __ bge(&done, Label::kNear);
3476 3475
3477 // Deoptimize on overflow. 3476 // Deoptimize on overflow.
3478 __ Cmp32(input, Operand(0x80000000)); 3477 __ Cmp32(input, Operand(0x80000000));
3479 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 3478 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
3480 3479
3481 __ LoadComplementRR(result, result); 3480 __ LoadComplementRR(result, result);
3482 __ bind(&done); 3481 __ bind(&done);
3483 } 3482 }
3484 #endif 3483 #endif
3485 3484
3486 void LCodeGen::DoMathAbs(LMathAbs* instr) { 3485 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3487 // Class for deferred case. 3486 // Class for deferred case.
3488 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { 3487 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3489 public: 3488 public:
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
3526 3525
3527 void LCodeGen::DoMathFloor(LMathFloor* instr) { 3526 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3528 DoubleRegister input = ToDoubleRegister(instr->value()); 3527 DoubleRegister input = ToDoubleRegister(instr->value());
3529 Register result = ToRegister(instr->result()); 3528 Register result = ToRegister(instr->result());
3530 Register input_high = scratch0(); 3529 Register input_high = scratch0();
3531 Register scratch = ip; 3530 Register scratch = ip;
3532 Label done, exact; 3531 Label done, exact;
3533 3532
3534 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done, 3533 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
3535 &exact); 3534 &exact);
3536 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); 3535 DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
3537 3536
3538 __ bind(&exact); 3537 __ bind(&exact);
3539 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3538 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3540 // Test for -0. 3539 // Test for -0.
3541 __ CmpP(result, Operand::Zero()); 3540 __ CmpP(result, Operand::Zero());
3542 __ bne(&done, Label::kNear); 3541 __ bne(&done, Label::kNear);
3543 __ Cmp32(input_high, Operand::Zero()); 3542 __ Cmp32(input_high, Operand::Zero());
3544 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 3543 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
3545 } 3544 }
3546 __ bind(&done); 3545 __ bind(&done);
3547 } 3546 }
3548 3547
3549 void LCodeGen::DoMathRound(LMathRound* instr) { 3548 void LCodeGen::DoMathRound(LMathRound* instr) {
3550 DoubleRegister input = ToDoubleRegister(instr->value()); 3549 DoubleRegister input = ToDoubleRegister(instr->value());
3551 Register result = ToRegister(instr->result()); 3550 Register result = ToRegister(instr->result());
3552 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); 3551 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3553 DoubleRegister input_plus_dot_five = double_scratch1; 3552 DoubleRegister input_plus_dot_five = double_scratch1;
3554 Register scratch1 = scratch0(); 3553 Register scratch1 = scratch0();
3555 Register scratch2 = ip; 3554 Register scratch2 = ip;
3556 DoubleRegister dot_five = double_scratch0(); 3555 DoubleRegister dot_five = double_scratch0();
3557 Label convert, done; 3556 Label convert, done;
3558 3557
3559 __ LoadDoubleLiteral(dot_five, 0.5, r0); 3558 __ LoadDoubleLiteral(dot_five, 0.5, r0);
3560 __ lpdbr(double_scratch1, input); 3559 __ lpdbr(double_scratch1, input);
3561 __ cdbr(double_scratch1, dot_five); 3560 __ cdbr(double_scratch1, dot_five);
3562 DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN); 3561 DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN);
3563 // If input is in [-0.5, -0], the result is -0. 3562 // If input is in [-0.5, -0], the result is -0.
3564 // If input is in [+0, +0.5[, the result is +0. 3563 // If input is in [+0, +0.5[, the result is +0.
3565 // If the input is +0.5, the result is 1. 3564 // If the input is +0.5, the result is 1.
3566 __ bgt(&convert, Label::kNear); // Out of [-0.5, +0.5]. 3565 __ bgt(&convert, Label::kNear); // Out of [-0.5, +0.5].
3567 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3566 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3568 // [-0.5, -0] (negative) yields minus zero. 3567 // [-0.5, -0] (negative) yields minus zero.
3569 __ TestDoubleSign(input, scratch1); 3568 __ TestDoubleSign(input, scratch1);
3570 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 3569 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
3571 } 3570 }
3572 Label return_zero; 3571 Label return_zero;
3573 __ cdbr(input, dot_five); 3572 __ cdbr(input, dot_five);
3574 __ bne(&return_zero, Label::kNear); 3573 __ bne(&return_zero, Label::kNear);
3575 __ LoadImmP(result, Operand(1)); // +0.5. 3574 __ LoadImmP(result, Operand(1)); // +0.5.
3576 __ b(&done, Label::kNear); 3575 __ b(&done, Label::kNear);
3577 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on 3576 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3578 // flag kBailoutOnMinusZero. 3577 // flag kBailoutOnMinusZero.
3579 __ bind(&return_zero); 3578 __ bind(&return_zero);
3580 __ LoadImmP(result, Operand::Zero()); 3579 __ LoadImmP(result, Operand::Zero());
3581 __ b(&done, Label::kNear); 3580 __ b(&done, Label::kNear);
3582 3581
3583 __ bind(&convert); 3582 __ bind(&convert);
3584 __ ldr(input_plus_dot_five, input); 3583 __ ldr(input_plus_dot_five, input);
3585 __ adbr(input_plus_dot_five, dot_five); 3584 __ adbr(input_plus_dot_five, dot_five);
3586 // Reuse dot_five (double_scratch0) as we no longer need this value. 3585 // Reuse dot_five (double_scratch0) as we no longer need this value.
3587 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2, 3586 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
3588 double_scratch0(), &done, &done); 3587 double_scratch0(), &done, &done);
3589 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); 3588 DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
3590 __ bind(&done); 3589 __ bind(&done);
3591 } 3590 }
3592 3591
3593 void LCodeGen::DoMathFround(LMathFround* instr) { 3592 void LCodeGen::DoMathFround(LMathFround* instr) {
3594 DoubleRegister input_reg = ToDoubleRegister(instr->value()); 3593 DoubleRegister input_reg = ToDoubleRegister(instr->value());
3595 DoubleRegister output_reg = ToDoubleRegister(instr->result()); 3594 DoubleRegister output_reg = ToDoubleRegister(instr->result());
3596 3595
3597 // Round double to float 3596 // Round double to float
3598 __ ledbr(output_reg, input_reg); 3597 __ ledbr(output_reg, input_reg);
3599 // Extend from float to double 3598 // Extend from float to double
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
3644 DCHECK(ToDoubleRegister(instr->result()).is(d3)); 3643 DCHECK(ToDoubleRegister(instr->result()).is(d3));
3645 3644
3646 if (exponent_type.IsSmi()) { 3645 if (exponent_type.IsSmi()) {
3647 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3646 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3648 __ CallStub(&stub); 3647 __ CallStub(&stub);
3649 } else if (exponent_type.IsTagged()) { 3648 } else if (exponent_type.IsTagged()) {
3650 Label no_deopt; 3649 Label no_deopt;
3651 __ JumpIfSmi(tagged_exponent, &no_deopt); 3650 __ JumpIfSmi(tagged_exponent, &no_deopt);
3652 __ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); 3651 __ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3653 __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex); 3652 __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
3654 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); 3653 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
3655 __ bind(&no_deopt); 3654 __ bind(&no_deopt);
3656 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3655 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3657 __ CallStub(&stub); 3656 __ CallStub(&stub);
3658 } else if (exponent_type.IsInteger32()) { 3657 } else if (exponent_type.IsInteger32()) {
3659 MathPowStub stub(isolate(), MathPowStub::INTEGER); 3658 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3660 __ CallStub(&stub); 3659 __ CallStub(&stub);
3661 } else { 3660 } else {
3662 DCHECK(exponent_type.IsDouble()); 3661 DCHECK(exponent_type.IsDouble());
3663 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 3662 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3664 __ CallStub(&stub); 3663 __ CallStub(&stub);
(...skipping 363 matching lines...) Expand 10 before | Expand all | Expand 10 after
4028 } else { 4027 } else {
4029 __ CmpLogical32(length, index); 4028 __ CmpLogical32(length, index);
4030 } 4029 }
4031 } 4030 }
4032 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 4031 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4033 Label done; 4032 Label done;
4034 __ b(NegateCondition(cc), &done, Label::kNear); 4033 __ b(NegateCondition(cc), &done, Label::kNear);
4035 __ stop("eliminated bounds check failed"); 4034 __ stop("eliminated bounds check failed");
4036 __ bind(&done); 4035 __ bind(&done);
4037 } else { 4036 } else {
4038 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); 4037 DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
4039 } 4038 }
4040 } 4039 }
4041 4040
4042 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 4041 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4043 Register external_pointer = ToRegister(instr->elements()); 4042 Register external_pointer = ToRegister(instr->elements());
4044 Register key = no_reg; 4043 Register key = no_reg;
4045 ElementsKind elements_kind = instr->elements_kind(); 4044 ElementsKind elements_kind = instr->elements_kind();
4046 bool key_is_constant = instr->key()->IsConstantOperand(); 4045 bool key_is_constant = instr->key()->IsConstantOperand();
4047 int constant_key = 0; 4046 int constant_key = 0;
4048 if (key_is_constant) { 4047 if (key_is_constant) {
(...skipping 339 matching lines...) Expand 10 before | Expand all | Expand 10 after
4388 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(), 4387 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4389 instr->hydrogen()->kind()); 4388 instr->hydrogen()->kind());
4390 __ CallStub(&stub); 4389 __ CallStub(&stub);
4391 RecordSafepointWithLazyDeopt( 4390 RecordSafepointWithLazyDeopt(
4392 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 4391 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4393 __ StoreToSafepointRegisterSlot(result, result); 4392 __ StoreToSafepointRegisterSlot(result, result);
4394 } 4393 }
4395 4394
4396 // Deopt on smi, which means the elements array changed to dictionary mode. 4395 // Deopt on smi, which means the elements array changed to dictionary mode.
4397 __ TestIfSmi(result); 4396 __ TestIfSmi(result);
4398 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); 4397 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
4399 } 4398 }
4400 4399
4401 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4400 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4402 Register object_reg = ToRegister(instr->object()); 4401 Register object_reg = ToRegister(instr->object());
4403 Register scratch = scratch0(); 4402 Register scratch = scratch0();
4404 4403
4405 Handle<Map> from_map = instr->original_map(); 4404 Handle<Map> from_map = instr->original_map();
4406 Handle<Map> to_map = instr->transitioned_map(); 4405 Handle<Map> to_map = instr->transitioned_map();
4407 ElementsKind from_kind = instr->from_kind(); 4406 ElementsKind from_kind = instr->from_kind();
4408 ElementsKind to_kind = instr->to_kind(); 4407 ElementsKind to_kind = instr->to_kind();
(...skipping 22 matching lines...) Expand all
4431 } 4430 }
4432 __ bind(&not_applicable); 4431 __ bind(&not_applicable);
4433 } 4432 }
4434 4433
4435 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 4434 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4436 Register object = ToRegister(instr->object()); 4435 Register object = ToRegister(instr->object());
4437 Register temp1 = ToRegister(instr->temp1()); 4436 Register temp1 = ToRegister(instr->temp1());
4438 Register temp2 = ToRegister(instr->temp2()); 4437 Register temp2 = ToRegister(instr->temp2());
4439 Label no_memento_found; 4438 Label no_memento_found;
4440 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); 4439 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
4441 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); 4440 DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
4442 __ bind(&no_memento_found); 4441 __ bind(&no_memento_found);
4443 } 4442 }
4444 4443
4445 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4444 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4446 DCHECK(ToRegister(instr->context()).is(cp)); 4445 DCHECK(ToRegister(instr->context()).is(cp));
4447 DCHECK(ToRegister(instr->left()).is(r3)); 4446 DCHECK(ToRegister(instr->left()).is(r3));
4448 DCHECK(ToRegister(instr->right()).is(r2)); 4447 DCHECK(ToRegister(instr->right()).is(r2));
4449 StringAddStub stub(isolate(), instr->hydrogen()->flags(), 4448 StringAddStub stub(isolate(), instr->hydrogen()->flags(),
4450 instr->hydrogen()->pretenure_flag()); 4449 instr->hydrogen()->pretenure_flag());
4451 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4450 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
(...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after
4729 __ StoreToSafepointRegisterSlot(r2, reg); 4728 __ StoreToSafepointRegisterSlot(r2, reg);
4730 } 4729 }
4731 4730
4732 void LCodeGen::DoSmiTag(LSmiTag* instr) { 4731 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4733 HChange* hchange = instr->hydrogen(); 4732 HChange* hchange = instr->hydrogen();
4734 Register input = ToRegister(instr->value()); 4733 Register input = ToRegister(instr->value());
4735 Register output = ToRegister(instr->result()); 4734 Register output = ToRegister(instr->result());
4736 if (hchange->CheckFlag(HValue::kCanOverflow) && 4735 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4737 hchange->value()->CheckFlag(HValue::kUint32)) { 4736 hchange->value()->CheckFlag(HValue::kUint32)) {
4738 __ TestUnsignedSmiCandidate(input, r0); 4737 __ TestUnsignedSmiCandidate(input, r0);
4739 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0); 4738 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0);
4740 } 4739 }
4741 #if !V8_TARGET_ARCH_S390X 4740 #if !V8_TARGET_ARCH_S390X
4742 if (hchange->CheckFlag(HValue::kCanOverflow) && 4741 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4743 !hchange->value()->CheckFlag(HValue::kUint32)) { 4742 !hchange->value()->CheckFlag(HValue::kUint32)) {
4744 __ SmiTagCheckOverflow(output, input, r0); 4743 __ SmiTagCheckOverflow(output, input, r0);
4745 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); 4744 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
4746 } else { 4745 } else {
4747 #endif 4746 #endif
4748 __ SmiTag(output, input); 4747 __ SmiTag(output, input);
4749 #if !V8_TARGET_ARCH_S390X 4748 #if !V8_TARGET_ARCH_S390X
4750 } 4749 }
4751 #endif 4750 #endif
4752 } 4751 }
4753 4752
4754 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4753 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4755 Register input = ToRegister(instr->value()); 4754 Register input = ToRegister(instr->value());
4756 Register result = ToRegister(instr->result()); 4755 Register result = ToRegister(instr->result());
4757 if (instr->needs_check()) { 4756 if (instr->needs_check()) {
4758 __ tmll(input, Operand(kHeapObjectTag)); 4757 __ tmll(input, Operand(kHeapObjectTag));
4759 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); 4758 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
4760 __ SmiUntag(result, input); 4759 __ SmiUntag(result, input);
4761 } else { 4760 } else {
4762 __ SmiUntag(result, input); 4761 __ SmiUntag(result, input);
4763 } 4762 }
4764 } 4763 }
4765 4764
4766 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, 4765 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4767 DoubleRegister result_reg, 4766 DoubleRegister result_reg,
4768 NumberUntagDMode mode) { 4767 NumberUntagDMode mode) {
4769 bool can_convert_undefined_to_nan = 4768 bool can_convert_undefined_to_nan =
4770 instr->hydrogen()->can_convert_undefined_to_nan(); 4769 instr->hydrogen()->can_convert_undefined_to_nan();
4771 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); 4770 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4772 4771
4773 Register scratch = scratch0(); 4772 Register scratch = scratch0();
4774 DCHECK(!result_reg.is(double_scratch0())); 4773 DCHECK(!result_reg.is(double_scratch0()));
4775 4774
4776 Label convert, load_smi, done; 4775 Label convert, load_smi, done;
4777 4776
4778 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 4777 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4779 // Smi check. 4778 // Smi check.
4780 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 4779 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4781 4780
4782 // Heap number map check. 4781 // Heap number map check.
4783 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4782 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4784 __ CmpP(scratch, RootMemOperand(Heap::kHeapNumberMapRootIndex)); 4783 __ CmpP(scratch, RootMemOperand(Heap::kHeapNumberMapRootIndex));
4785 4784
4786 if (can_convert_undefined_to_nan) { 4785 if (can_convert_undefined_to_nan) {
4787 __ bne(&convert, Label::kNear); 4786 __ bne(&convert, Label::kNear);
4788 } else { 4787 } else {
4789 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); 4788 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
4790 } 4789 }
4791 // load heap number 4790 // load heap number
4792 __ ld(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 4791 __ ld(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4793 if (deoptimize_on_minus_zero) { 4792 if (deoptimize_on_minus_zero) {
4794 __ TestDoubleIsMinusZero(result_reg, scratch, ip); 4793 __ TestDoubleIsMinusZero(result_reg, scratch, ip);
4795 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 4794 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
4796 } 4795 }
4797 __ b(&done, Label::kNear); 4796 __ b(&done, Label::kNear);
4798 if (can_convert_undefined_to_nan) { 4797 if (can_convert_undefined_to_nan) {
4799 __ bind(&convert); 4798 __ bind(&convert);
4800 // Convert undefined (and hole) to NaN. 4799 // Convert undefined (and hole) to NaN.
4801 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); 4800 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4802 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); 4801 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
4803 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 4802 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4804 __ ld(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); 4803 __ ld(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4805 __ b(&done, Label::kNear); 4804 __ b(&done, Label::kNear);
4806 } 4805 }
4807 } else { 4806 } else {
4808 __ SmiUntag(scratch, input_reg); 4807 __ SmiUntag(scratch, input_reg);
4809 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 4808 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4810 } 4809 }
4811 // Smi to double register conversion 4810 // Smi to double register conversion
4812 __ bind(&load_smi); 4811 __ bind(&load_smi);
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
4849 __ b(&done, Label::kNear); 4848 __ b(&done, Label::kNear);
4850 4849
4851 __ bind(&check_bools); 4850 __ bind(&check_bools);
4852 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex); 4851 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
4853 __ bne(&check_false, Label::kNear); 4852 __ bne(&check_false, Label::kNear);
4854 __ LoadImmP(input_reg, Operand(1)); 4853 __ LoadImmP(input_reg, Operand(1));
4855 __ b(&done, Label::kNear); 4854 __ b(&done, Label::kNear);
4856 4855
4857 __ bind(&check_false); 4856 __ bind(&check_false);
4858 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex); 4857 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
4859 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean); 4858 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
4860 __ LoadImmP(input_reg, Operand::Zero()); 4859 __ LoadImmP(input_reg, Operand::Zero());
4861 } else { 4860 } else {
4862 // Deoptimize if we don't have a heap number. 4861 // Deoptimize if we don't have a heap number.
4863 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); 4862 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
4864 4863
4865 __ ld(double_scratch2, 4864 __ ld(double_scratch2,
4866 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 4865 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4867 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4866 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4868 // preserve heap number pointer in scratch2 for minus zero check below 4867 // preserve heap number pointer in scratch2 for minus zero check below
4869 __ LoadRR(scratch2, input_reg); 4868 __ LoadRR(scratch2, input_reg);
4870 } 4869 }
4871 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1, 4870 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
4872 double_scratch); 4871 double_scratch);
4873 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 4872 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
4874 4873
4875 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4874 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4876 __ CmpP(input_reg, Operand::Zero()); 4875 __ CmpP(input_reg, Operand::Zero());
4877 __ bne(&done, Label::kNear); 4876 __ bne(&done, Label::kNear);
4878 __ TestHeapNumberSign(scratch2, scratch1); 4877 __ TestHeapNumberSign(scratch2, scratch1);
4879 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 4878 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
4880 } 4879 }
4881 } 4880 }
4882 __ bind(&done); 4881 __ bind(&done);
4883 } 4882 }
4884 4883
4885 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4884 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4886 class DeferredTaggedToI final : public LDeferredCode { 4885 class DeferredTaggedToI final : public LDeferredCode {
4887 public: 4886 public:
4888 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 4887 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4889 : LDeferredCode(codegen), instr_(instr) {} 4888 : LDeferredCode(codegen), instr_(instr) {}
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
4935 Register scratch1 = scratch0(); 4934 Register scratch1 = scratch0();
4936 DoubleRegister double_input = ToDoubleRegister(instr->value()); 4935 DoubleRegister double_input = ToDoubleRegister(instr->value());
4937 DoubleRegister double_scratch = double_scratch0(); 4936 DoubleRegister double_scratch = double_scratch0();
4938 4937
4939 if (instr->truncating()) { 4938 if (instr->truncating()) {
4940 __ TruncateDoubleToI(result_reg, double_input); 4939 __ TruncateDoubleToI(result_reg, double_input);
4941 } else { 4940 } else {
4942 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, 4941 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
4943 double_scratch); 4942 double_scratch);
4944 // Deoptimize if the input wasn't a int32 (inside a double). 4943 // Deoptimize if the input wasn't a int32 (inside a double).
4945 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 4944 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
4946 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4945 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4947 Label done; 4946 Label done;
4948 __ CmpP(result_reg, Operand::Zero()); 4947 __ CmpP(result_reg, Operand::Zero());
4949 __ bne(&done, Label::kNear); 4948 __ bne(&done, Label::kNear);
4950 __ TestDoubleSign(double_input, scratch1); 4949 __ TestDoubleSign(double_input, scratch1);
4951 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 4950 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
4952 __ bind(&done); 4951 __ bind(&done);
4953 } 4952 }
4954 } 4953 }
4955 } 4954 }
4956 4955
4957 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 4956 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4958 Register result_reg = ToRegister(instr->result()); 4957 Register result_reg = ToRegister(instr->result());
4959 Register scratch1 = scratch0(); 4958 Register scratch1 = scratch0();
4960 DoubleRegister double_input = ToDoubleRegister(instr->value()); 4959 DoubleRegister double_input = ToDoubleRegister(instr->value());
4961 DoubleRegister double_scratch = double_scratch0(); 4960 DoubleRegister double_scratch = double_scratch0();
4962 4961
4963 if (instr->truncating()) { 4962 if (instr->truncating()) {
4964 __ TruncateDoubleToI(result_reg, double_input); 4963 __ TruncateDoubleToI(result_reg, double_input);
4965 } else { 4964 } else {
4966 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, 4965 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
4967 double_scratch); 4966 double_scratch);
4968 // Deoptimize if the input wasn't a int32 (inside a double). 4967 // Deoptimize if the input wasn't a int32 (inside a double).
4969 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 4968 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
4970 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4969 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4971 Label done; 4970 Label done;
4972 __ CmpP(result_reg, Operand::Zero()); 4971 __ CmpP(result_reg, Operand::Zero());
4973 __ bne(&done, Label::kNear); 4972 __ bne(&done, Label::kNear);
4974 __ TestDoubleSign(double_input, scratch1); 4973 __ TestDoubleSign(double_input, scratch1);
4975 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 4974 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
4976 __ bind(&done); 4975 __ bind(&done);
4977 } 4976 }
4978 } 4977 }
4979 #if V8_TARGET_ARCH_S390X 4978 #if V8_TARGET_ARCH_S390X
4980 __ SmiTag(result_reg); 4979 __ SmiTag(result_reg);
4981 #else 4980 #else
4982 __ SmiTagCheckOverflow(result_reg, r0); 4981 __ SmiTagCheckOverflow(result_reg, r0);
4983 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); 4982 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
4984 #endif 4983 #endif
4985 } 4984 }
4986 4985
4987 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 4986 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4988 LOperand* input = instr->value(); 4987 LOperand* input = instr->value();
4989 __ TestIfSmi(ToRegister(input)); 4988 __ TestIfSmi(ToRegister(input));
4990 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); 4989 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
4991 } 4990 }
4992 4991
4993 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 4992 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4994 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 4993 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4995 LOperand* input = instr->value(); 4994 LOperand* input = instr->value();
4996 __ TestIfSmi(ToRegister(input)); 4995 __ TestIfSmi(ToRegister(input));
4997 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); 4996 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
4998 } 4997 }
4999 } 4998 }
5000 4999
5001 void LCodeGen::DoCheckArrayBufferNotNeutered( 5000 void LCodeGen::DoCheckArrayBufferNotNeutered(
5002 LCheckArrayBufferNotNeutered* instr) { 5001 LCheckArrayBufferNotNeutered* instr) {
5003 Register view = ToRegister(instr->view()); 5002 Register view = ToRegister(instr->view());
5004 Register scratch = scratch0(); 5003 Register scratch = scratch0();
5005 5004
5006 __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); 5005 __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5007 __ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); 5006 __ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5008 __ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); 5007 __ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
5009 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0); 5008 DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0);
5010 } 5009 }
5011 5010
5012 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 5011 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5013 Register input = ToRegister(instr->value()); 5012 Register input = ToRegister(instr->value());
5014 Register scratch = scratch0(); 5013 Register scratch = scratch0();
5015 5014
5016 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5015 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5017 5016
5018 if (instr->hydrogen()->is_interval_check()) { 5017 if (instr->hydrogen()->is_interval_check()) {
5019 InstanceType first; 5018 InstanceType first;
5020 InstanceType last; 5019 InstanceType last;
5021 instr->hydrogen()->GetCheckInterval(&first, &last); 5020 instr->hydrogen()->GetCheckInterval(&first, &last);
5022 5021
5023 __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset), 5022 __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
5024 Operand(first)); 5023 Operand(first));
5025 5024
5026 // If there is only one type in the interval check for equality. 5025 // If there is only one type in the interval check for equality.
5027 if (first == last) { 5026 if (first == last) {
5028 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); 5027 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
5029 } else { 5028 } else {
5030 DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType); 5029 DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType);
5031 // Omit check for the last type. 5030 // Omit check for the last type.
5032 if (last != LAST_TYPE) { 5031 if (last != LAST_TYPE) {
5033 __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset), 5032 __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
5034 Operand(last)); 5033 Operand(last));
5035 DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType); 5034 DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType);
5036 } 5035 }
5037 } 5036 }
5038 } else { 5037 } else {
5039 uint8_t mask; 5038 uint8_t mask;
5040 uint8_t tag; 5039 uint8_t tag;
5041 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 5040 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5042 5041
5043 __ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 5042 __ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5044 5043
5045 if (base::bits::IsPowerOfTwo32(mask)) { 5044 if (base::bits::IsPowerOfTwo32(mask)) {
5046 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); 5045 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5047 __ AndP(scratch, Operand(mask)); 5046 __ AndP(scratch, Operand(mask));
5048 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType); 5047 DeoptimizeIf(tag == 0 ? ne : eq, instr,
5048 DeoptimizeReason::kWrongInstanceType);
5049 } else { 5049 } else {
5050 __ AndP(scratch, Operand(mask)); 5050 __ AndP(scratch, Operand(mask));
5051 __ CmpP(scratch, Operand(tag)); 5051 __ CmpP(scratch, Operand(tag));
5052 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); 5052 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
5053 } 5053 }
5054 } 5054 }
5055 } 5055 }
5056 5056
5057 void LCodeGen::DoCheckValue(LCheckValue* instr) { 5057 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5058 Register reg = ToRegister(instr->value()); 5058 Register reg = ToRegister(instr->value());
5059 Handle<HeapObject> object = instr->hydrogen()->object().handle(); 5059 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5060 AllowDeferredHandleDereference smi_check; 5060 AllowDeferredHandleDereference smi_check;
5061 if (isolate()->heap()->InNewSpace(*object)) { 5061 if (isolate()->heap()->InNewSpace(*object)) {
5062 Register reg = ToRegister(instr->value()); 5062 Register reg = ToRegister(instr->value());
5063 Handle<Cell> cell = isolate()->factory()->NewCell(object); 5063 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5064 __ mov(ip, Operand(cell)); 5064 __ mov(ip, Operand(cell));
5065 __ CmpP(reg, FieldMemOperand(ip, Cell::kValueOffset)); 5065 __ CmpP(reg, FieldMemOperand(ip, Cell::kValueOffset));
5066 } else { 5066 } else {
5067 __ CmpP(reg, Operand(object)); 5067 __ CmpP(reg, Operand(object));
5068 } 5068 }
5069 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); 5069 DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
5070 } 5070 }
5071 5071
5072 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 5072 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5073 Register temp = ToRegister(instr->temp()); 5073 Register temp = ToRegister(instr->temp());
5074 { 5074 {
5075 PushSafepointRegistersScope scope(this); 5075 PushSafepointRegistersScope scope(this);
5076 __ push(object); 5076 __ push(object);
5077 __ LoadImmP(cp, Operand::Zero()); 5077 __ LoadImmP(cp, Operand::Zero());
5078 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 5078 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5079 RecordSafepointWithRegisters(instr->pointer_map(), 1, 5079 RecordSafepointWithRegisters(instr->pointer_map(), 1,
5080 Safepoint::kNoLazyDeopt); 5080 Safepoint::kNoLazyDeopt);
5081 __ StoreToSafepointRegisterSlot(r2, temp); 5081 __ StoreToSafepointRegisterSlot(r2, temp);
5082 } 5082 }
5083 __ TestIfSmi(temp); 5083 __ TestIfSmi(temp);
5084 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0); 5084 DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
5085 } 5085 }
5086 5086
5087 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 5087 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5088 class DeferredCheckMaps final : public LDeferredCode { 5088 class DeferredCheckMaps final : public LDeferredCode {
5089 public: 5089 public:
5090 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 5090 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5091 : LDeferredCode(codegen), instr_(instr), object_(object) { 5091 : LDeferredCode(codegen), instr_(instr), object_(object) {
5092 SetExit(check_maps()); 5092 SetExit(check_maps());
5093 } 5093 }
5094 void Generate() override { 5094 void Generate() override {
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
5127 Handle<Map> map = maps->at(i).handle(); 5127 Handle<Map> map = maps->at(i).handle();
5128 __ CompareMap(reg, map, &success); 5128 __ CompareMap(reg, map, &success);
5129 __ beq(&success); 5129 __ beq(&success);
5130 } 5130 }
5131 5131
5132 Handle<Map> map = maps->at(maps->size() - 1).handle(); 5132 Handle<Map> map = maps->at(maps->size() - 1).handle();
5133 __ CompareMap(reg, map, &success); 5133 __ CompareMap(reg, map, &success);
5134 if (instr->hydrogen()->HasMigrationTarget()) { 5134 if (instr->hydrogen()->HasMigrationTarget()) {
5135 __ bne(deferred->entry()); 5135 __ bne(deferred->entry());
5136 } else { 5136 } else {
5137 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); 5137 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
5138 } 5138 }
5139 5139
5140 __ bind(&success); 5140 __ bind(&success);
5141 } 5141 }
5142 5142
5143 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5143 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5144 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); 5144 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5145 Register result_reg = ToRegister(instr->result()); 5145 Register result_reg = ToRegister(instr->result());
5146 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); 5146 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5147 } 5147 }
(...skipping 15 matching lines...) Expand all
5163 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); 5163 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5164 5164
5165 // Check for heap number 5165 // Check for heap number
5166 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5166 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5167 __ CmpP(scratch, Operand(factory()->heap_number_map())); 5167 __ CmpP(scratch, Operand(factory()->heap_number_map()));
5168 __ beq(&heap_number, Label::kNear); 5168 __ beq(&heap_number, Label::kNear);
5169 5169
5170 // Check for undefined. Undefined is converted to zero for clamping 5170 // Check for undefined. Undefined is converted to zero for clamping
5171 // conversions. 5171 // conversions.
5172 __ CmpP(input_reg, Operand(factory()->undefined_value())); 5172 __ CmpP(input_reg, Operand(factory()->undefined_value()));
5173 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); 5173 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
5174 __ LoadImmP(result_reg, Operand::Zero()); 5174 __ LoadImmP(result_reg, Operand::Zero());
5175 __ b(&done, Label::kNear); 5175 __ b(&done, Label::kNear);
5176 5176
5177 // Heap number 5177 // Heap number
5178 __ bind(&heap_number); 5178 __ bind(&heap_number);
5179 __ ld(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 5179 __ ld(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5180 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); 5180 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5181 __ b(&done, Label::kNear); 5181 __ b(&done, Label::kNear);
5182 5182
5183 // smi 5183 // smi
(...skipping 400 matching lines...) Expand 10 before | Expand all | Expand 10 after
5584 __ CmpSmiLiteral(result, Smi::FromInt(0), r0); 5584 __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
5585 __ bne(&load_cache, Label::kNear); 5585 __ bne(&load_cache, Label::kNear);
5586 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); 5586 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5587 __ b(&done, Label::kNear); 5587 __ b(&done, Label::kNear);
5588 5588
5589 __ bind(&load_cache); 5589 __ bind(&load_cache);
5590 __ LoadInstanceDescriptors(map, result); 5590 __ LoadInstanceDescriptors(map, result);
5591 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 5591 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5592 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 5592 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5593 __ CmpP(result, Operand::Zero()); 5593 __ CmpP(result, Operand::Zero());
5594 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache); 5594 DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
5595 5595
5596 __ bind(&done); 5596 __ bind(&done);
5597 } 5597 }
5598 5598
5599 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5599 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5600 Register object = ToRegister(instr->value()); 5600 Register object = ToRegister(instr->value());
5601 Register map = ToRegister(instr->map()); 5601 Register map = ToRegister(instr->map());
5602 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); 5602 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5603 __ CmpP(map, scratch0()); 5603 __ CmpP(map, scratch0());
5604 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); 5604 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
5605 } 5605 }
5606 5606
5607 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 5607 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5608 Register result, Register object, 5608 Register result, Register object,
5609 Register index) { 5609 Register index) {
5610 PushSafepointRegistersScope scope(this); 5610 PushSafepointRegistersScope scope(this);
5611 __ Push(object, index); 5611 __ Push(object, index);
5612 __ LoadImmP(cp, Operand::Zero()); 5612 __ LoadImmP(cp, Operand::Zero());
5613 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); 5613 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5614 RecordSafepointWithRegisters(instr->pointer_map(), 2, 5614 RecordSafepointWithRegisters(instr->pointer_map(), 2,
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
5670 __ LoadP(result, 5670 __ LoadP(result,
5671 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); 5671 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
5672 __ bind(deferred->exit()); 5672 __ bind(deferred->exit());
5673 __ bind(&done); 5673 __ bind(&done);
5674 } 5674 }
5675 5675
5676 #undef __ 5676 #undef __
5677 5677
5678 } // namespace internal 5678 } // namespace internal
5679 } // namespace v8 5679 } // namespace v8
OLDNEW
« no previous file with comments | « src/crankshaft/s390/lithium-codegen-s390.h ('k') | src/crankshaft/x64/lithium-codegen-x64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698