| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/crankshaft/ppc/lithium-codegen-ppc.h" | 5 #include "src/crankshaft/ppc/lithium-codegen-ppc.h" |
| 6 | 6 |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/code-factory.h" | 8 #include "src/code-factory.h" |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/crankshaft/hydrogen-osr.h" | 10 #include "src/crankshaft/hydrogen-osr.h" |
| (...skipping 695 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 706 Translation translation(&translations_, frame_count, jsframe_count, zone()); | 706 Translation translation(&translations_, frame_count, jsframe_count, zone()); |
| 707 WriteTranslation(environment, &translation); | 707 WriteTranslation(environment, &translation); |
| 708 int deoptimization_index = deoptimizations_.length(); | 708 int deoptimization_index = deoptimizations_.length(); |
| 709 int pc_offset = masm()->pc_offset(); | 709 int pc_offset = masm()->pc_offset(); |
| 710 environment->Register(deoptimization_index, translation.index(), | 710 environment->Register(deoptimization_index, translation.index(), |
| 711 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 711 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| 712 deoptimizations_.Add(environment, zone()); | 712 deoptimizations_.Add(environment, zone()); |
| 713 } | 713 } |
| 714 } | 714 } |
| 715 | 715 |
| 716 | |
| 717 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, | 716 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, |
| 718 Deoptimizer::DeoptReason deopt_reason, | 717 DeoptimizeReason deopt_reason, |
| 719 Deoptimizer::BailoutType bailout_type, | 718 Deoptimizer::BailoutType bailout_type, |
| 720 CRegister cr) { | 719 CRegister cr) { |
| 721 LEnvironment* environment = instr->environment(); | 720 LEnvironment* environment = instr->environment(); |
| 722 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 721 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 723 DCHECK(environment->HasBeenRegistered()); | 722 DCHECK(environment->HasBeenRegistered()); |
| 724 int id = environment->deoptimization_index(); | 723 int id = environment->deoptimization_index(); |
| 725 Address entry = | 724 Address entry = |
| 726 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 725 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 727 if (entry == NULL) { | 726 if (entry == NULL) { |
| 728 Abort(kBailoutWasNotPrepared); | 727 Abort(kBailoutWasNotPrepared); |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 770 // jump entry if this is the case. | 769 // jump entry if this is the case. |
| 771 if (FLAG_trace_deopt || isolate()->is_profiling() || | 770 if (FLAG_trace_deopt || isolate()->is_profiling() || |
| 772 jump_table_.is_empty() || | 771 jump_table_.is_empty() || |
| 773 !table_entry.IsEquivalentTo(jump_table_.last())) { | 772 !table_entry.IsEquivalentTo(jump_table_.last())) { |
| 774 jump_table_.Add(table_entry, zone()); | 773 jump_table_.Add(table_entry, zone()); |
| 775 } | 774 } |
| 776 __ b(cond, &jump_table_.last().label, cr); | 775 __ b(cond, &jump_table_.last().label, cr); |
| 777 } | 776 } |
| 778 } | 777 } |
| 779 | 778 |
| 780 | |
| 781 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 779 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
| 782 Deoptimizer::DeoptReason deopt_reason, | 780 DeoptimizeReason deopt_reason, CRegister cr) { |
| 783 CRegister cr) { | |
| 784 Deoptimizer::BailoutType bailout_type = | 781 Deoptimizer::BailoutType bailout_type = |
| 785 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; | 782 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; |
| 786 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr); | 783 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr); |
| 787 } | 784 } |
| 788 | 785 |
| 789 | 786 |
| 790 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, | 787 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, |
| 791 SafepointMode safepoint_mode) { | 788 SafepointMode safepoint_mode) { |
| 792 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { | 789 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { |
| 793 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); | 790 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 895 Label dividend_is_not_negative, done; | 892 Label dividend_is_not_negative, done; |
| 896 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 893 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
| 897 __ cmpwi(dividend, Operand::Zero()); | 894 __ cmpwi(dividend, Operand::Zero()); |
| 898 __ bge(÷nd_is_not_negative); | 895 __ bge(÷nd_is_not_negative); |
| 899 if (shift) { | 896 if (shift) { |
| 900 // Note that this is correct even for kMinInt operands. | 897 // Note that this is correct even for kMinInt operands. |
| 901 __ neg(dividend, dividend); | 898 __ neg(dividend, dividend); |
| 902 __ ExtractBitRange(dividend, dividend, shift - 1, 0); | 899 __ ExtractBitRange(dividend, dividend, shift - 1, 0); |
| 903 __ neg(dividend, dividend, LeaveOE, SetRC); | 900 __ neg(dividend, dividend, LeaveOE, SetRC); |
| 904 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 901 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 905 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0); | 902 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0); |
| 906 } | 903 } |
| 907 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 904 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 908 __ li(dividend, Operand::Zero()); | 905 __ li(dividend, Operand::Zero()); |
| 909 } else { | 906 } else { |
| 910 DeoptimizeIf(al, instr, Deoptimizer::kMinusZero); | 907 DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero); |
| 911 } | 908 } |
| 912 __ b(&done); | 909 __ b(&done); |
| 913 } | 910 } |
| 914 | 911 |
| 915 __ bind(÷nd_is_not_negative); | 912 __ bind(÷nd_is_not_negative); |
| 916 if (shift) { | 913 if (shift) { |
| 917 __ ExtractBitRange(dividend, dividend, shift - 1, 0); | 914 __ ExtractBitRange(dividend, dividend, shift - 1, 0); |
| 918 } else { | 915 } else { |
| 919 __ li(dividend, Operand::Zero()); | 916 __ li(dividend, Operand::Zero()); |
| 920 } | 917 } |
| 921 __ bind(&done); | 918 __ bind(&done); |
| 922 } | 919 } |
| 923 | 920 |
| 924 | 921 |
| 925 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 922 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
| 926 Register dividend = ToRegister(instr->dividend()); | 923 Register dividend = ToRegister(instr->dividend()); |
| 927 int32_t divisor = instr->divisor(); | 924 int32_t divisor = instr->divisor(); |
| 928 Register result = ToRegister(instr->result()); | 925 Register result = ToRegister(instr->result()); |
| 929 DCHECK(!dividend.is(result)); | 926 DCHECK(!dividend.is(result)); |
| 930 | 927 |
| 931 if (divisor == 0) { | 928 if (divisor == 0) { |
| 932 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 929 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); |
| 933 return; | 930 return; |
| 934 } | 931 } |
| 935 | 932 |
| 936 __ TruncatingDiv(result, dividend, Abs(divisor)); | 933 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 937 __ mov(ip, Operand(Abs(divisor))); | 934 __ mov(ip, Operand(Abs(divisor))); |
| 938 __ mullw(result, result, ip); | 935 __ mullw(result, result, ip); |
| 939 __ sub(result, dividend, result, LeaveOE, SetRC); | 936 __ sub(result, dividend, result, LeaveOE, SetRC); |
| 940 | 937 |
| 941 // Check for negative zero. | 938 // Check for negative zero. |
| 942 HMod* hmod = instr->hydrogen(); | 939 HMod* hmod = instr->hydrogen(); |
| 943 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 940 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 944 Label remainder_not_zero; | 941 Label remainder_not_zero; |
| 945 __ bne(&remainder_not_zero, cr0); | 942 __ bne(&remainder_not_zero, cr0); |
| 946 __ cmpwi(dividend, Operand::Zero()); | 943 __ cmpwi(dividend, Operand::Zero()); |
| 947 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 944 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); |
| 948 __ bind(&remainder_not_zero); | 945 __ bind(&remainder_not_zero); |
| 949 } | 946 } |
| 950 } | 947 } |
| 951 | 948 |
| 952 | 949 |
| 953 void LCodeGen::DoModI(LModI* instr) { | 950 void LCodeGen::DoModI(LModI* instr) { |
| 954 HMod* hmod = instr->hydrogen(); | 951 HMod* hmod = instr->hydrogen(); |
| 955 Register left_reg = ToRegister(instr->left()); | 952 Register left_reg = ToRegister(instr->left()); |
| 956 Register right_reg = ToRegister(instr->right()); | 953 Register right_reg = ToRegister(instr->right()); |
| 957 Register result_reg = ToRegister(instr->result()); | 954 Register result_reg = ToRegister(instr->result()); |
| 958 Register scratch = scratch0(); | 955 Register scratch = scratch0(); |
| 959 bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow); | 956 bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow); |
| 960 Label done; | 957 Label done; |
| 961 | 958 |
| 962 if (can_overflow) { | 959 if (can_overflow) { |
| 963 __ li(r0, Operand::Zero()); // clear xer | 960 __ li(r0, Operand::Zero()); // clear xer |
| 964 __ mtxer(r0); | 961 __ mtxer(r0); |
| 965 } | 962 } |
| 966 | 963 |
| 967 __ divw(scratch, left_reg, right_reg, SetOE, SetRC); | 964 __ divw(scratch, left_reg, right_reg, SetOE, SetRC); |
| 968 | 965 |
| 969 // Check for x % 0. | 966 // Check for x % 0. |
| 970 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 967 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 971 __ cmpwi(right_reg, Operand::Zero()); | 968 __ cmpwi(right_reg, Operand::Zero()); |
| 972 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | 969 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); |
| 973 } | 970 } |
| 974 | 971 |
| 975 // Check for kMinInt % -1, divw will return undefined, which is not what we | 972 // Check for kMinInt % -1, divw will return undefined, which is not what we |
| 976 // want. We have to deopt if we care about -0, because we can't return that. | 973 // want. We have to deopt if we care about -0, because we can't return that. |
| 977 if (can_overflow) { | 974 if (can_overflow) { |
| 978 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 975 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 979 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0); | 976 DeoptimizeIf(overflow, instr, DeoptimizeReason::kMinusZero, cr0); |
| 980 } else { | 977 } else { |
| 981 if (CpuFeatures::IsSupported(ISELECT)) { | 978 if (CpuFeatures::IsSupported(ISELECT)) { |
| 982 __ isel(overflow, result_reg, r0, result_reg, cr0); | 979 __ isel(overflow, result_reg, r0, result_reg, cr0); |
| 983 __ boverflow(&done, cr0); | 980 __ boverflow(&done, cr0); |
| 984 } else { | 981 } else { |
| 985 Label no_overflow_possible; | 982 Label no_overflow_possible; |
| 986 __ bnooverflow(&no_overflow_possible, cr0); | 983 __ bnooverflow(&no_overflow_possible, cr0); |
| 987 __ li(result_reg, Operand::Zero()); | 984 __ li(result_reg, Operand::Zero()); |
| 988 __ b(&done); | 985 __ b(&done); |
| 989 __ bind(&no_overflow_possible); | 986 __ bind(&no_overflow_possible); |
| 990 } | 987 } |
| 991 } | 988 } |
| 992 } | 989 } |
| 993 | 990 |
| 994 __ mullw(scratch, right_reg, scratch); | 991 __ mullw(scratch, right_reg, scratch); |
| 995 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC); | 992 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC); |
| 996 | 993 |
| 997 // If we care about -0, test if the dividend is <0 and the result is 0. | 994 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 998 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 995 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 999 __ bne(&done, cr0); | 996 __ bne(&done, cr0); |
| 1000 __ cmpwi(left_reg, Operand::Zero()); | 997 __ cmpwi(left_reg, Operand::Zero()); |
| 1001 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 998 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); |
| 1002 } | 999 } |
| 1003 | 1000 |
| 1004 __ bind(&done); | 1001 __ bind(&done); |
| 1005 } | 1002 } |
| 1006 | 1003 |
| 1007 | 1004 |
| 1008 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1005 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 1009 Register dividend = ToRegister(instr->dividend()); | 1006 Register dividend = ToRegister(instr->dividend()); |
| 1010 int32_t divisor = instr->divisor(); | 1007 int32_t divisor = instr->divisor(); |
| 1011 Register result = ToRegister(instr->result()); | 1008 Register result = ToRegister(instr->result()); |
| 1012 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1009 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
| 1013 DCHECK(!result.is(dividend)); | 1010 DCHECK(!result.is(dividend)); |
| 1014 | 1011 |
| 1015 // Check for (0 / -x) that will produce negative zero. | 1012 // Check for (0 / -x) that will produce negative zero. |
| 1016 HDiv* hdiv = instr->hydrogen(); | 1013 HDiv* hdiv = instr->hydrogen(); |
| 1017 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1014 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1018 __ cmpwi(dividend, Operand::Zero()); | 1015 __ cmpwi(dividend, Operand::Zero()); |
| 1019 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1016 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1020 } | 1017 } |
| 1021 // Check for (kMinInt / -1). | 1018 // Check for (kMinInt / -1). |
| 1022 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1019 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
| 1023 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); | 1020 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
| 1024 __ cmpw(dividend, r0); | 1021 __ cmpw(dividend, r0); |
| 1025 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); | 1022 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); |
| 1026 } | 1023 } |
| 1027 | 1024 |
| 1028 int32_t shift = WhichPowerOf2Abs(divisor); | 1025 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1029 | 1026 |
| 1030 // Deoptimize if remainder will not be 0. | 1027 // Deoptimize if remainder will not be 0. |
| 1031 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) { | 1028 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) { |
| 1032 __ TestBitRange(dividend, shift - 1, 0, r0); | 1029 __ TestBitRange(dividend, shift - 1, 0, r0); |
| 1033 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0); | 1030 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0); |
| 1034 } | 1031 } |
| 1035 | 1032 |
| 1036 if (divisor == -1) { // Nice shortcut, not needed for correctness. | 1033 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
| 1037 __ neg(result, dividend); | 1034 __ neg(result, dividend); |
| 1038 return; | 1035 return; |
| 1039 } | 1036 } |
| 1040 if (shift == 0) { | 1037 if (shift == 0) { |
| 1041 __ mr(result, dividend); | 1038 __ mr(result, dividend); |
| 1042 } else { | 1039 } else { |
| 1043 if (shift == 1) { | 1040 if (shift == 1) { |
| 1044 __ srwi(result, dividend, Operand(31)); | 1041 __ srwi(result, dividend, Operand(31)); |
| 1045 } else { | 1042 } else { |
| 1046 __ srawi(result, dividend, 31); | 1043 __ srawi(result, dividend, 31); |
| 1047 __ srwi(result, result, Operand(32 - shift)); | 1044 __ srwi(result, result, Operand(32 - shift)); |
| 1048 } | 1045 } |
| 1049 __ add(result, dividend, result); | 1046 __ add(result, dividend, result); |
| 1050 __ srawi(result, result, shift); | 1047 __ srawi(result, result, shift); |
| 1051 } | 1048 } |
| 1052 if (divisor < 0) __ neg(result, result); | 1049 if (divisor < 0) __ neg(result, result); |
| 1053 } | 1050 } |
| 1054 | 1051 |
| 1055 | 1052 |
| 1056 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1053 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
| 1057 Register dividend = ToRegister(instr->dividend()); | 1054 Register dividend = ToRegister(instr->dividend()); |
| 1058 int32_t divisor = instr->divisor(); | 1055 int32_t divisor = instr->divisor(); |
| 1059 Register result = ToRegister(instr->result()); | 1056 Register result = ToRegister(instr->result()); |
| 1060 DCHECK(!dividend.is(result)); | 1057 DCHECK(!dividend.is(result)); |
| 1061 | 1058 |
| 1062 if (divisor == 0) { | 1059 if (divisor == 0) { |
| 1063 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 1060 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); |
| 1064 return; | 1061 return; |
| 1065 } | 1062 } |
| 1066 | 1063 |
| 1067 // Check for (0 / -x) that will produce negative zero. | 1064 // Check for (0 / -x) that will produce negative zero. |
| 1068 HDiv* hdiv = instr->hydrogen(); | 1065 HDiv* hdiv = instr->hydrogen(); |
| 1069 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1066 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1070 __ cmpwi(dividend, Operand::Zero()); | 1067 __ cmpwi(dividend, Operand::Zero()); |
| 1071 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1068 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1072 } | 1069 } |
| 1073 | 1070 |
| 1074 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1071 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1075 if (divisor < 0) __ neg(result, result); | 1072 if (divisor < 0) __ neg(result, result); |
| 1076 | 1073 |
| 1077 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1074 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 1078 Register scratch = scratch0(); | 1075 Register scratch = scratch0(); |
| 1079 __ mov(ip, Operand(divisor)); | 1076 __ mov(ip, Operand(divisor)); |
| 1080 __ mullw(scratch, result, ip); | 1077 __ mullw(scratch, result, ip); |
| 1081 __ cmpw(scratch, dividend); | 1078 __ cmpw(scratch, dividend); |
| 1082 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); | 1079 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); |
| 1083 } | 1080 } |
| 1084 } | 1081 } |
| 1085 | 1082 |
| 1086 | 1083 |
| 1087 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1084 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
| 1088 void LCodeGen::DoDivI(LDivI* instr) { | 1085 void LCodeGen::DoDivI(LDivI* instr) { |
| 1089 HBinaryOperation* hdiv = instr->hydrogen(); | 1086 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1090 const Register dividend = ToRegister(instr->dividend()); | 1087 const Register dividend = ToRegister(instr->dividend()); |
| 1091 const Register divisor = ToRegister(instr->divisor()); | 1088 const Register divisor = ToRegister(instr->divisor()); |
| 1092 Register result = ToRegister(instr->result()); | 1089 Register result = ToRegister(instr->result()); |
| 1093 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow); | 1090 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow); |
| 1094 | 1091 |
| 1095 DCHECK(!dividend.is(result)); | 1092 DCHECK(!dividend.is(result)); |
| 1096 DCHECK(!divisor.is(result)); | 1093 DCHECK(!divisor.is(result)); |
| 1097 | 1094 |
| 1098 if (can_overflow) { | 1095 if (can_overflow) { |
| 1099 __ li(r0, Operand::Zero()); // clear xer | 1096 __ li(r0, Operand::Zero()); // clear xer |
| 1100 __ mtxer(r0); | 1097 __ mtxer(r0); |
| 1101 } | 1098 } |
| 1102 | 1099 |
| 1103 __ divw(result, dividend, divisor, SetOE, SetRC); | 1100 __ divw(result, dividend, divisor, SetOE, SetRC); |
| 1104 | 1101 |
| 1105 // Check for x / 0. | 1102 // Check for x / 0. |
| 1106 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1103 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1107 __ cmpwi(divisor, Operand::Zero()); | 1104 __ cmpwi(divisor, Operand::Zero()); |
| 1108 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | 1105 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); |
| 1109 } | 1106 } |
| 1110 | 1107 |
| 1111 // Check for (0 / -x) that will produce negative zero. | 1108 // Check for (0 / -x) that will produce negative zero. |
| 1112 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1109 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1113 Label dividend_not_zero; | 1110 Label dividend_not_zero; |
| 1114 __ cmpwi(dividend, Operand::Zero()); | 1111 __ cmpwi(dividend, Operand::Zero()); |
| 1115 __ bne(÷nd_not_zero); | 1112 __ bne(÷nd_not_zero); |
| 1116 __ cmpwi(divisor, Operand::Zero()); | 1113 __ cmpwi(divisor, Operand::Zero()); |
| 1117 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 1114 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); |
| 1118 __ bind(÷nd_not_zero); | 1115 __ bind(÷nd_not_zero); |
| 1119 } | 1116 } |
| 1120 | 1117 |
| 1121 // Check for (kMinInt / -1). | 1118 // Check for (kMinInt / -1). |
| 1122 if (can_overflow) { | 1119 if (can_overflow) { |
| 1123 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1120 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1124 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); | 1121 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); |
| 1125 } else { | 1122 } else { |
| 1126 // When truncating, we want kMinInt / -1 = kMinInt. | 1123 // When truncating, we want kMinInt / -1 = kMinInt. |
| 1127 if (CpuFeatures::IsSupported(ISELECT)) { | 1124 if (CpuFeatures::IsSupported(ISELECT)) { |
| 1128 __ isel(overflow, result, dividend, result, cr0); | 1125 __ isel(overflow, result, dividend, result, cr0); |
| 1129 } else { | 1126 } else { |
| 1130 Label no_overflow_possible; | 1127 Label no_overflow_possible; |
| 1131 __ bnooverflow(&no_overflow_possible, cr0); | 1128 __ bnooverflow(&no_overflow_possible, cr0); |
| 1132 __ mr(result, dividend); | 1129 __ mr(result, dividend); |
| 1133 __ bind(&no_overflow_possible); | 1130 __ bind(&no_overflow_possible); |
| 1134 } | 1131 } |
| 1135 } | 1132 } |
| 1136 } | 1133 } |
| 1137 | 1134 |
| 1138 #if V8_TARGET_ARCH_PPC64 | 1135 #if V8_TARGET_ARCH_PPC64 |
| 1139 __ extsw(result, result); | 1136 __ extsw(result, result); |
| 1140 #endif | 1137 #endif |
| 1141 | 1138 |
| 1142 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1139 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 1143 // Deoptimize if remainder is not 0. | 1140 // Deoptimize if remainder is not 0. |
| 1144 Register scratch = scratch0(); | 1141 Register scratch = scratch0(); |
| 1145 __ mullw(scratch, divisor, result); | 1142 __ mullw(scratch, divisor, result); |
| 1146 __ cmpw(dividend, scratch); | 1143 __ cmpw(dividend, scratch); |
| 1147 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); | 1144 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); |
| 1148 } | 1145 } |
| 1149 } | 1146 } |
| 1150 | 1147 |
| 1151 | 1148 |
| 1152 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | 1149 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
| 1153 HBinaryOperation* hdiv = instr->hydrogen(); | 1150 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1154 Register dividend = ToRegister(instr->dividend()); | 1151 Register dividend = ToRegister(instr->dividend()); |
| 1155 Register result = ToRegister(instr->result()); | 1152 Register result = ToRegister(instr->result()); |
| 1156 int32_t divisor = instr->divisor(); | 1153 int32_t divisor = instr->divisor(); |
| 1157 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt); | 1154 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt); |
| 1158 | 1155 |
| 1159 // If the divisor is positive, things are easy: There can be no deopts and we | 1156 // If the divisor is positive, things are easy: There can be no deopts and we |
| 1160 // can simply do an arithmetic right shift. | 1157 // can simply do an arithmetic right shift. |
| 1161 int32_t shift = WhichPowerOf2Abs(divisor); | 1158 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1162 if (divisor > 0) { | 1159 if (divisor > 0) { |
| 1163 if (shift || !result.is(dividend)) { | 1160 if (shift || !result.is(dividend)) { |
| 1164 __ srawi(result, dividend, shift); | 1161 __ srawi(result, dividend, shift); |
| 1165 } | 1162 } |
| 1166 return; | 1163 return; |
| 1167 } | 1164 } |
| 1168 | 1165 |
| 1169 // If the divisor is negative, we have to negate and handle edge cases. | 1166 // If the divisor is negative, we have to negate and handle edge cases. |
| 1170 OEBit oe = LeaveOE; | 1167 OEBit oe = LeaveOE; |
| 1171 #if V8_TARGET_ARCH_PPC64 | 1168 #if V8_TARGET_ARCH_PPC64 |
| 1172 if (divisor == -1 && can_overflow) { | 1169 if (divisor == -1 && can_overflow) { |
| 1173 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); | 1170 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
| 1174 __ cmpw(dividend, r0); | 1171 __ cmpw(dividend, r0); |
| 1175 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); | 1172 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); |
| 1176 } | 1173 } |
| 1177 #else | 1174 #else |
| 1178 if (can_overflow) { | 1175 if (can_overflow) { |
| 1179 __ li(r0, Operand::Zero()); // clear xer | 1176 __ li(r0, Operand::Zero()); // clear xer |
| 1180 __ mtxer(r0); | 1177 __ mtxer(r0); |
| 1181 oe = SetOE; | 1178 oe = SetOE; |
| 1182 } | 1179 } |
| 1183 #endif | 1180 #endif |
| 1184 | 1181 |
| 1185 __ neg(result, dividend, oe, SetRC); | 1182 __ neg(result, dividend, oe, SetRC); |
| 1186 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1183 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1187 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0); | 1184 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0); |
| 1188 } | 1185 } |
| 1189 | 1186 |
| 1190 // If the negation could not overflow, simply shifting is OK. | 1187 // If the negation could not overflow, simply shifting is OK. |
| 1191 #if !V8_TARGET_ARCH_PPC64 | 1188 #if !V8_TARGET_ARCH_PPC64 |
| 1192 if (!can_overflow) { | 1189 if (!can_overflow) { |
| 1193 #endif | 1190 #endif |
| 1194 if (shift) { | 1191 if (shift) { |
| 1195 __ ShiftRightArithImm(result, result, shift); | 1192 __ ShiftRightArithImm(result, result, shift); |
| 1196 } | 1193 } |
| 1197 return; | 1194 return; |
| 1198 #if !V8_TARGET_ARCH_PPC64 | 1195 #if !V8_TARGET_ARCH_PPC64 |
| 1199 } | 1196 } |
| 1200 | 1197 |
| 1201 // Dividing by -1 is basically negation, unless we overflow. | 1198 // Dividing by -1 is basically negation, unless we overflow. |
| 1202 if (divisor == -1) { | 1199 if (divisor == -1) { |
| 1203 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); | 1200 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); |
| 1204 return; | 1201 return; |
| 1205 } | 1202 } |
| 1206 | 1203 |
| 1207 Label overflow, done; | 1204 Label overflow, done; |
| 1208 __ boverflow(&overflow, cr0); | 1205 __ boverflow(&overflow, cr0); |
| 1209 __ srawi(result, result, shift); | 1206 __ srawi(result, result, shift); |
| 1210 __ b(&done); | 1207 __ b(&done); |
| 1211 __ bind(&overflow); | 1208 __ bind(&overflow); |
| 1212 __ mov(result, Operand(kMinInt / divisor)); | 1209 __ mov(result, Operand(kMinInt / divisor)); |
| 1213 __ bind(&done); | 1210 __ bind(&done); |
| 1214 #endif | 1211 #endif |
| 1215 } | 1212 } |
| 1216 | 1213 |
| 1217 | 1214 |
| 1218 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1215 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
| 1219 Register dividend = ToRegister(instr->dividend()); | 1216 Register dividend = ToRegister(instr->dividend()); |
| 1220 int32_t divisor = instr->divisor(); | 1217 int32_t divisor = instr->divisor(); |
| 1221 Register result = ToRegister(instr->result()); | 1218 Register result = ToRegister(instr->result()); |
| 1222 DCHECK(!dividend.is(result)); | 1219 DCHECK(!dividend.is(result)); |
| 1223 | 1220 |
| 1224 if (divisor == 0) { | 1221 if (divisor == 0) { |
| 1225 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 1222 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); |
| 1226 return; | 1223 return; |
| 1227 } | 1224 } |
| 1228 | 1225 |
| 1229 // Check for (0 / -x) that will produce negative zero. | 1226 // Check for (0 / -x) that will produce negative zero. |
| 1230 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1227 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
| 1231 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1228 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1232 __ cmpwi(dividend, Operand::Zero()); | 1229 __ cmpwi(dividend, Operand::Zero()); |
| 1233 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1230 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1234 } | 1231 } |
| 1235 | 1232 |
| 1236 // Easy case: We need no dynamic check for the dividend and the flooring | 1233 // Easy case: We need no dynamic check for the dividend and the flooring |
| 1237 // division is the same as the truncating division. | 1234 // division is the same as the truncating division. |
| 1238 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1235 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
| 1239 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1236 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
| 1240 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1237 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1241 if (divisor < 0) __ neg(result, result); | 1238 if (divisor < 0) __ neg(result, result); |
| 1242 return; | 1239 return; |
| 1243 } | 1240 } |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1275 if (can_overflow) { | 1272 if (can_overflow) { |
| 1276 __ li(r0, Operand::Zero()); // clear xer | 1273 __ li(r0, Operand::Zero()); // clear xer |
| 1277 __ mtxer(r0); | 1274 __ mtxer(r0); |
| 1278 } | 1275 } |
| 1279 | 1276 |
| 1280 __ divw(result, dividend, divisor, SetOE, SetRC); | 1277 __ divw(result, dividend, divisor, SetOE, SetRC); |
| 1281 | 1278 |
| 1282 // Check for x / 0. | 1279 // Check for x / 0. |
| 1283 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1280 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1284 __ cmpwi(divisor, Operand::Zero()); | 1281 __ cmpwi(divisor, Operand::Zero()); |
| 1285 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | 1282 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); |
| 1286 } | 1283 } |
| 1287 | 1284 |
| 1288 // Check for (0 / -x) that will produce negative zero. | 1285 // Check for (0 / -x) that will produce negative zero. |
| 1289 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1286 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1290 Label dividend_not_zero; | 1287 Label dividend_not_zero; |
| 1291 __ cmpwi(dividend, Operand::Zero()); | 1288 __ cmpwi(dividend, Operand::Zero()); |
| 1292 __ bne(÷nd_not_zero); | 1289 __ bne(÷nd_not_zero); |
| 1293 __ cmpwi(divisor, Operand::Zero()); | 1290 __ cmpwi(divisor, Operand::Zero()); |
| 1294 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 1291 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); |
| 1295 __ bind(÷nd_not_zero); | 1292 __ bind(÷nd_not_zero); |
| 1296 } | 1293 } |
| 1297 | 1294 |
| 1298 // Check for (kMinInt / -1). | 1295 // Check for (kMinInt / -1). |
| 1299 if (can_overflow) { | 1296 if (can_overflow) { |
| 1300 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1297 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1301 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); | 1298 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); |
| 1302 } else { | 1299 } else { |
| 1303 // When truncating, we want kMinInt / -1 = kMinInt. | 1300 // When truncating, we want kMinInt / -1 = kMinInt. |
| 1304 if (CpuFeatures::IsSupported(ISELECT)) { | 1301 if (CpuFeatures::IsSupported(ISELECT)) { |
| 1305 __ isel(overflow, result, dividend, result, cr0); | 1302 __ isel(overflow, result, dividend, result, cr0); |
| 1306 } else { | 1303 } else { |
| 1307 Label no_overflow_possible; | 1304 Label no_overflow_possible; |
| 1308 __ bnooverflow(&no_overflow_possible, cr0); | 1305 __ bnooverflow(&no_overflow_possible, cr0); |
| 1309 __ mr(result, dividend); | 1306 __ mr(result, dividend); |
| 1310 __ bind(&no_overflow_possible); | 1307 __ bind(&no_overflow_possible); |
| 1311 } | 1308 } |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1369 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 1366 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 1370 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1367 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1371 | 1368 |
| 1372 if (right_op->IsConstantOperand()) { | 1369 if (right_op->IsConstantOperand()) { |
| 1373 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 1370 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
| 1374 | 1371 |
| 1375 if (bailout_on_minus_zero && (constant < 0)) { | 1372 if (bailout_on_minus_zero && (constant < 0)) { |
| 1376 // The case of a null constant will be handled separately. | 1373 // The case of a null constant will be handled separately. |
| 1377 // If constant is negative and left is null, the result should be -0. | 1374 // If constant is negative and left is null, the result should be -0. |
| 1378 __ cmpi(left, Operand::Zero()); | 1375 __ cmpi(left, Operand::Zero()); |
| 1379 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1376 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1380 } | 1377 } |
| 1381 | 1378 |
| 1382 switch (constant) { | 1379 switch (constant) { |
| 1383 case -1: | 1380 case -1: |
| 1384 if (can_overflow) { | 1381 if (can_overflow) { |
| 1385 #if V8_TARGET_ARCH_PPC64 | 1382 #if V8_TARGET_ARCH_PPC64 |
| 1386 if (instr->hydrogen()->representation().IsSmi()) { | 1383 if (instr->hydrogen()->representation().IsSmi()) { |
| 1387 #endif | 1384 #endif |
| 1388 __ li(r0, Operand::Zero()); // clear xer | 1385 __ li(r0, Operand::Zero()); // clear xer |
| 1389 __ mtxer(r0); | 1386 __ mtxer(r0); |
| 1390 __ neg(result, left, SetOE, SetRC); | 1387 __ neg(result, left, SetOE, SetRC); |
| 1391 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); | 1388 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); |
| 1392 #if V8_TARGET_ARCH_PPC64 | 1389 #if V8_TARGET_ARCH_PPC64 |
| 1393 } else { | 1390 } else { |
| 1394 __ neg(result, left); | 1391 __ neg(result, left); |
| 1395 __ TestIfInt32(result, r0); | 1392 __ TestIfInt32(result, r0); |
| 1396 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | 1393 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); |
| 1397 } | 1394 } |
| 1398 #endif | 1395 #endif |
| 1399 } else { | 1396 } else { |
| 1400 __ neg(result, left); | 1397 __ neg(result, left); |
| 1401 } | 1398 } |
| 1402 break; | 1399 break; |
| 1403 case 0: | 1400 case 0: |
| 1404 if (bailout_on_minus_zero) { | 1401 if (bailout_on_minus_zero) { |
| 1405 // If left is strictly negative and the constant is null, the | 1402 // If left is strictly negative and the constant is null, the |
| 1406 // result is -0. Deoptimize if required, otherwise return 0. | 1403 // result is -0. Deoptimize if required, otherwise return 0. |
| 1407 #if V8_TARGET_ARCH_PPC64 | 1404 #if V8_TARGET_ARCH_PPC64 |
| 1408 if (instr->hydrogen()->representation().IsSmi()) { | 1405 if (instr->hydrogen()->representation().IsSmi()) { |
| 1409 #endif | 1406 #endif |
| 1410 __ cmpi(left, Operand::Zero()); | 1407 __ cmpi(left, Operand::Zero()); |
| 1411 #if V8_TARGET_ARCH_PPC64 | 1408 #if V8_TARGET_ARCH_PPC64 |
| 1412 } else { | 1409 } else { |
| 1413 __ cmpwi(left, Operand::Zero()); | 1410 __ cmpwi(left, Operand::Zero()); |
| 1414 } | 1411 } |
| 1415 #endif | 1412 #endif |
| 1416 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 1413 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); |
| 1417 } | 1414 } |
| 1418 __ li(result, Operand::Zero()); | 1415 __ li(result, Operand::Zero()); |
| 1419 break; | 1416 break; |
| 1420 case 1: | 1417 case 1: |
| 1421 __ Move(result, left); | 1418 __ Move(result, left); |
| 1422 break; | 1419 break; |
| 1423 default: | 1420 default: |
| 1424 // Multiplying by powers of two and powers of two plus or minus | 1421 // Multiplying by powers of two and powers of two plus or minus |
| 1425 // one can be done faster with shifted operands. | 1422 // one can be done faster with shifted operands. |
| 1426 // For other constants we emit standard code. | 1423 // For other constants we emit standard code. |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1459 #if V8_TARGET_ARCH_PPC64 | 1456 #if V8_TARGET_ARCH_PPC64 |
| 1460 // result = left * right. | 1457 // result = left * right. |
| 1461 if (instr->hydrogen()->representation().IsSmi()) { | 1458 if (instr->hydrogen()->representation().IsSmi()) { |
| 1462 __ SmiUntag(result, left); | 1459 __ SmiUntag(result, left); |
| 1463 __ SmiUntag(scratch, right); | 1460 __ SmiUntag(scratch, right); |
| 1464 __ Mul(result, result, scratch); | 1461 __ Mul(result, result, scratch); |
| 1465 } else { | 1462 } else { |
| 1466 __ Mul(result, left, right); | 1463 __ Mul(result, left, right); |
| 1467 } | 1464 } |
| 1468 __ TestIfInt32(result, r0); | 1465 __ TestIfInt32(result, r0); |
| 1469 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | 1466 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); |
| 1470 if (instr->hydrogen()->representation().IsSmi()) { | 1467 if (instr->hydrogen()->representation().IsSmi()) { |
| 1471 __ SmiTag(result); | 1468 __ SmiTag(result); |
| 1472 } | 1469 } |
| 1473 #else | 1470 #else |
| 1474 // scratch:result = left * right. | 1471 // scratch:result = left * right. |
| 1475 if (instr->hydrogen()->representation().IsSmi()) { | 1472 if (instr->hydrogen()->representation().IsSmi()) { |
| 1476 __ SmiUntag(result, left); | 1473 __ SmiUntag(result, left); |
| 1477 __ mulhw(scratch, result, right); | 1474 __ mulhw(scratch, result, right); |
| 1478 __ mullw(result, result, right); | 1475 __ mullw(result, result, right); |
| 1479 } else { | 1476 } else { |
| 1480 __ mulhw(scratch, left, right); | 1477 __ mulhw(scratch, left, right); |
| 1481 __ mullw(result, left, right); | 1478 __ mullw(result, left, right); |
| 1482 } | 1479 } |
| 1483 __ TestIfInt32(scratch, result, r0); | 1480 __ TestIfInt32(scratch, result, r0); |
| 1484 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | 1481 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); |
| 1485 #endif | 1482 #endif |
| 1486 } else { | 1483 } else { |
| 1487 if (instr->hydrogen()->representation().IsSmi()) { | 1484 if (instr->hydrogen()->representation().IsSmi()) { |
| 1488 __ SmiUntag(result, left); | 1485 __ SmiUntag(result, left); |
| 1489 __ Mul(result, result, right); | 1486 __ Mul(result, result, right); |
| 1490 } else { | 1487 } else { |
| 1491 __ Mul(result, left, right); | 1488 __ Mul(result, left, right); |
| 1492 } | 1489 } |
| 1493 } | 1490 } |
| 1494 | 1491 |
| 1495 if (bailout_on_minus_zero) { | 1492 if (bailout_on_minus_zero) { |
| 1496 Label done; | 1493 Label done; |
| 1497 #if V8_TARGET_ARCH_PPC64 | 1494 #if V8_TARGET_ARCH_PPC64 |
| 1498 if (instr->hydrogen()->representation().IsSmi()) { | 1495 if (instr->hydrogen()->representation().IsSmi()) { |
| 1499 #endif | 1496 #endif |
| 1500 __ xor_(r0, left, right, SetRC); | 1497 __ xor_(r0, left, right, SetRC); |
| 1501 __ bge(&done, cr0); | 1498 __ bge(&done, cr0); |
| 1502 #if V8_TARGET_ARCH_PPC64 | 1499 #if V8_TARGET_ARCH_PPC64 |
| 1503 } else { | 1500 } else { |
| 1504 __ xor_(r0, left, right); | 1501 __ xor_(r0, left, right); |
| 1505 __ cmpwi(r0, Operand::Zero()); | 1502 __ cmpwi(r0, Operand::Zero()); |
| 1506 __ bge(&done); | 1503 __ bge(&done); |
| 1507 } | 1504 } |
| 1508 #endif | 1505 #endif |
| 1509 // Bail out if the result is minus zero. | 1506 // Bail out if the result is minus zero. |
| 1510 __ cmpi(result, Operand::Zero()); | 1507 __ cmpi(result, Operand::Zero()); |
| 1511 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1508 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1512 __ bind(&done); | 1509 __ bind(&done); |
| 1513 } | 1510 } |
| 1514 } | 1511 } |
| 1515 } | 1512 } |
| 1516 | 1513 |
| 1517 | 1514 |
| 1518 void LCodeGen::DoBitI(LBitI* instr) { | 1515 void LCodeGen::DoBitI(LBitI* instr) { |
| 1519 LOperand* left_op = instr->left(); | 1516 LOperand* left_op = instr->left(); |
| 1520 LOperand* right_op = instr->right(); | 1517 LOperand* right_op = instr->right(); |
| 1521 DCHECK(left_op->IsRegister()); | 1518 DCHECK(left_op->IsRegister()); |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1587 break; | 1584 break; |
| 1588 case Token::SAR: | 1585 case Token::SAR: |
| 1589 __ sraw(result, left, scratch); | 1586 __ sraw(result, left, scratch); |
| 1590 break; | 1587 break; |
| 1591 case Token::SHR: | 1588 case Token::SHR: |
| 1592 if (instr->can_deopt()) { | 1589 if (instr->can_deopt()) { |
| 1593 __ srw(result, left, scratch, SetRC); | 1590 __ srw(result, left, scratch, SetRC); |
| 1594 #if V8_TARGET_ARCH_PPC64 | 1591 #if V8_TARGET_ARCH_PPC64 |
| 1595 __ extsw(result, result, SetRC); | 1592 __ extsw(result, result, SetRC); |
| 1596 #endif | 1593 #endif |
| 1597 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0); | 1594 DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0); |
| 1598 } else { | 1595 } else { |
| 1599 __ srw(result, left, scratch); | 1596 __ srw(result, left, scratch); |
| 1600 } | 1597 } |
| 1601 break; | 1598 break; |
| 1602 case Token::SHL: | 1599 case Token::SHL: |
| 1603 __ slw(result, left, scratch); | 1600 __ slw(result, left, scratch); |
| 1604 #if V8_TARGET_ARCH_PPC64 | 1601 #if V8_TARGET_ARCH_PPC64 |
| 1605 __ extsw(result, result); | 1602 __ extsw(result, result); |
| 1606 #endif | 1603 #endif |
| 1607 break; | 1604 break; |
| (...skipping 19 matching lines...) Expand all Loading... |
| 1627 } else { | 1624 } else { |
| 1628 __ Move(result, left); | 1625 __ Move(result, left); |
| 1629 } | 1626 } |
| 1630 break; | 1627 break; |
| 1631 case Token::SHR: | 1628 case Token::SHR: |
| 1632 if (shift_count != 0) { | 1629 if (shift_count != 0) { |
| 1633 __ srwi(result, left, Operand(shift_count)); | 1630 __ srwi(result, left, Operand(shift_count)); |
| 1634 } else { | 1631 } else { |
| 1635 if (instr->can_deopt()) { | 1632 if (instr->can_deopt()) { |
| 1636 __ cmpwi(left, Operand::Zero()); | 1633 __ cmpwi(left, Operand::Zero()); |
| 1637 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue); | 1634 DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue); |
| 1638 } | 1635 } |
| 1639 __ Move(result, left); | 1636 __ Move(result, left); |
| 1640 } | 1637 } |
| 1641 break; | 1638 break; |
| 1642 case Token::SHL: | 1639 case Token::SHL: |
| 1643 if (shift_count != 0) { | 1640 if (shift_count != 0) { |
| 1644 #if V8_TARGET_ARCH_PPC64 | 1641 #if V8_TARGET_ARCH_PPC64 |
| 1645 if (instr->hydrogen_value()->representation().IsSmi()) { | 1642 if (instr->hydrogen_value()->representation().IsSmi()) { |
| 1646 __ sldi(result, left, Operand(shift_count)); | 1643 __ sldi(result, left, Operand(shift_count)); |
| 1647 #else | 1644 #else |
| 1648 if (instr->hydrogen_value()->representation().IsSmi() && | 1645 if (instr->hydrogen_value()->representation().IsSmi() && |
| 1649 instr->can_deopt()) { | 1646 instr->can_deopt()) { |
| 1650 if (shift_count != 1) { | 1647 if (shift_count != 1) { |
| 1651 __ slwi(result, left, Operand(shift_count - 1)); | 1648 __ slwi(result, left, Operand(shift_count - 1)); |
| 1652 __ SmiTagCheckOverflow(result, result, scratch); | 1649 __ SmiTagCheckOverflow(result, result, scratch); |
| 1653 } else { | 1650 } else { |
| 1654 __ SmiTagCheckOverflow(result, left, scratch); | 1651 __ SmiTagCheckOverflow(result, left, scratch); |
| 1655 } | 1652 } |
| 1656 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); | 1653 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); |
| 1657 #endif | 1654 #endif |
| 1658 } else { | 1655 } else { |
| 1659 __ slwi(result, left, Operand(shift_count)); | 1656 __ slwi(result, left, Operand(shift_count)); |
| 1660 #if V8_TARGET_ARCH_PPC64 | 1657 #if V8_TARGET_ARCH_PPC64 |
| 1661 __ extsw(result, result); | 1658 __ extsw(result, result); |
| 1662 #endif | 1659 #endif |
| 1663 } | 1660 } |
| 1664 } else { | 1661 } else { |
| 1665 __ Move(result, left); | 1662 __ Move(result, left); |
| 1666 } | 1663 } |
| (...skipping 18 matching lines...) Expand all Loading... |
| 1685 #endif | 1682 #endif |
| 1686 if (!can_overflow || isInteger) { | 1683 if (!can_overflow || isInteger) { |
| 1687 if (right->IsConstantOperand()) { | 1684 if (right->IsConstantOperand()) { |
| 1688 __ Add(result, left, -(ToOperand(right).immediate()), r0); | 1685 __ Add(result, left, -(ToOperand(right).immediate()), r0); |
| 1689 } else { | 1686 } else { |
| 1690 __ sub(result, left, EmitLoadRegister(right, ip)); | 1687 __ sub(result, left, EmitLoadRegister(right, ip)); |
| 1691 } | 1688 } |
| 1692 #if V8_TARGET_ARCH_PPC64 | 1689 #if V8_TARGET_ARCH_PPC64 |
| 1693 if (can_overflow) { | 1690 if (can_overflow) { |
| 1694 __ TestIfInt32(result, r0); | 1691 __ TestIfInt32(result, r0); |
| 1695 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | 1692 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); |
| 1696 } | 1693 } |
| 1697 #endif | 1694 #endif |
| 1698 } else { | 1695 } else { |
| 1699 if (right->IsConstantOperand()) { | 1696 if (right->IsConstantOperand()) { |
| 1700 __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()), | 1697 __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()), |
| 1701 scratch0(), r0); | 1698 scratch0(), r0); |
| 1702 } else { | 1699 } else { |
| 1703 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), | 1700 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), |
| 1704 scratch0(), r0); | 1701 scratch0(), r0); |
| 1705 } | 1702 } |
| 1706 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); | 1703 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); |
| 1707 } | 1704 } |
| 1708 } | 1705 } |
| 1709 | 1706 |
| 1710 | 1707 |
| 1711 void LCodeGen::DoRSubI(LRSubI* instr) { | 1708 void LCodeGen::DoRSubI(LRSubI* instr) { |
| 1712 LOperand* left = instr->left(); | 1709 LOperand* left = instr->left(); |
| 1713 LOperand* right = instr->right(); | 1710 LOperand* right = instr->right(); |
| 1714 LOperand* result = instr->result(); | 1711 LOperand* result = instr->result(); |
| 1715 | 1712 |
| 1716 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) && | 1713 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) && |
| (...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1862 | 1859 |
| 1863 if (!can_overflow || isInteger) { | 1860 if (!can_overflow || isInteger) { |
| 1864 if (right->IsConstantOperand()) { | 1861 if (right->IsConstantOperand()) { |
| 1865 __ Add(result, left, ToOperand(right).immediate(), r0); | 1862 __ Add(result, left, ToOperand(right).immediate(), r0); |
| 1866 } else { | 1863 } else { |
| 1867 __ add(result, left, EmitLoadRegister(right, ip)); | 1864 __ add(result, left, EmitLoadRegister(right, ip)); |
| 1868 } | 1865 } |
| 1869 #if V8_TARGET_ARCH_PPC64 | 1866 #if V8_TARGET_ARCH_PPC64 |
| 1870 if (can_overflow) { | 1867 if (can_overflow) { |
| 1871 __ TestIfInt32(result, r0); | 1868 __ TestIfInt32(result, r0); |
| 1872 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | 1869 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); |
| 1873 } | 1870 } |
| 1874 #endif | 1871 #endif |
| 1875 } else { | 1872 } else { |
| 1876 if (right->IsConstantOperand()) { | 1873 if (right->IsConstantOperand()) { |
| 1877 __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(), | 1874 __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(), |
| 1878 scratch0(), r0); | 1875 scratch0(), r0); |
| 1879 } else { | 1876 } else { |
| 1880 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), | 1877 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), |
| 1881 scratch0(), r0); | 1878 scratch0(), r0); |
| 1882 } | 1879 } |
| 1883 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); | 1880 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); |
| 1884 } | 1881 } |
| 1885 } | 1882 } |
| 1886 | 1883 |
| 1887 | 1884 |
| 1888 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 1885 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| 1889 LOperand* left = instr->left(); | 1886 LOperand* left = instr->left(); |
| 1890 LOperand* right = instr->right(); | 1887 LOperand* right = instr->right(); |
| 1891 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 1888 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
| 1892 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge; | 1889 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge; |
| 1893 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { | 1890 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { |
| (...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2124 } | 2121 } |
| 2125 | 2122 |
| 2126 if (expected.Contains(ToBooleanICStub::SMI)) { | 2123 if (expected.Contains(ToBooleanICStub::SMI)) { |
| 2127 // Smis: 0 -> false, all other -> true. | 2124 // Smis: 0 -> false, all other -> true. |
| 2128 __ cmpi(reg, Operand::Zero()); | 2125 __ cmpi(reg, Operand::Zero()); |
| 2129 __ beq(instr->FalseLabel(chunk_)); | 2126 __ beq(instr->FalseLabel(chunk_)); |
| 2130 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2127 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
| 2131 } else if (expected.NeedsMap()) { | 2128 } else if (expected.NeedsMap()) { |
| 2132 // If we need a map later and have a Smi -> deopt. | 2129 // If we need a map later and have a Smi -> deopt. |
| 2133 __ TestIfSmi(reg, r0); | 2130 __ TestIfSmi(reg, r0); |
| 2134 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); | 2131 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); |
| 2135 } | 2132 } |
| 2136 | 2133 |
| 2137 const Register map = scratch0(); | 2134 const Register map = scratch0(); |
| 2138 if (expected.NeedsMap()) { | 2135 if (expected.NeedsMap()) { |
| 2139 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2136 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 2140 | 2137 |
| 2141 if (expected.CanBeUndetectable()) { | 2138 if (expected.CanBeUndetectable()) { |
| 2142 // Undetectable -> false. | 2139 // Undetectable -> false. |
| 2143 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset)); | 2140 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 2144 __ TestBit(ip, Map::kIsUndetectable, r0); | 2141 __ TestBit(ip, Map::kIsUndetectable, r0); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2188 __ mfcr(r0); | 2185 __ mfcr(r0); |
| 2189 __ andi(r0, r0, Operand(crZOrNaNBits)); | 2186 __ andi(r0, r0, Operand(crZOrNaNBits)); |
| 2190 __ bne(instr->FalseLabel(chunk_), cr0); | 2187 __ bne(instr->FalseLabel(chunk_), cr0); |
| 2191 __ b(instr->TrueLabel(chunk_)); | 2188 __ b(instr->TrueLabel(chunk_)); |
| 2192 __ bind(¬_heap_number); | 2189 __ bind(¬_heap_number); |
| 2193 } | 2190 } |
| 2194 | 2191 |
| 2195 if (!expected.IsGeneric()) { | 2192 if (!expected.IsGeneric()) { |
| 2196 // We've seen something for the first time -> deopt. | 2193 // We've seen something for the first time -> deopt. |
| 2197 // This can only happen if we are not generic already. | 2194 // This can only happen if we are not generic already. |
| 2198 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject); | 2195 DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject); |
| 2199 } | 2196 } |
| 2200 } | 2197 } |
| 2201 } | 2198 } |
| 2202 } | 2199 } |
| 2203 | 2200 |
| 2204 | 2201 |
| 2205 void LCodeGen::EmitGoto(int block) { | 2202 void LCodeGen::EmitGoto(int block) { |
| 2206 if (!IsNextEmittedBlock(block)) { | 2203 if (!IsNextEmittedBlock(block)) { |
| 2207 __ b(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2204 __ b(chunk_->GetAssemblyLabel(LookupDestination(block))); |
| 2208 } | 2205 } |
| (...skipping 363 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2572 | 2569 |
| 2573 // Loop through the {object}s prototype chain looking for the {prototype}. | 2570 // Loop through the {object}s prototype chain looking for the {prototype}. |
| 2574 __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); | 2571 __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2575 Label loop; | 2572 Label loop; |
| 2576 __ bind(&loop); | 2573 __ bind(&loop); |
| 2577 | 2574 |
| 2578 // Deoptimize if the object needs to be access checked. | 2575 // Deoptimize if the object needs to be access checked. |
| 2579 __ lbz(object_instance_type, | 2576 __ lbz(object_instance_type, |
| 2580 FieldMemOperand(object_map, Map::kBitFieldOffset)); | 2577 FieldMemOperand(object_map, Map::kBitFieldOffset)); |
| 2581 __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0); | 2578 __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0); |
| 2582 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0); | 2579 DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0); |
| 2583 // Deoptimize for proxies. | 2580 // Deoptimize for proxies. |
| 2584 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); | 2581 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); |
| 2585 DeoptimizeIf(eq, instr, Deoptimizer::kProxy); | 2582 DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy); |
| 2586 __ LoadP(object_prototype, | 2583 __ LoadP(object_prototype, |
| 2587 FieldMemOperand(object_map, Map::kPrototypeOffset)); | 2584 FieldMemOperand(object_map, Map::kPrototypeOffset)); |
| 2588 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); | 2585 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); |
| 2589 EmitFalseBranch(instr, eq); | 2586 EmitFalseBranch(instr, eq); |
| 2590 __ cmp(object_prototype, prototype); | 2587 __ cmp(object_prototype, prototype); |
| 2591 EmitTrueBranch(instr, eq); | 2588 EmitTrueBranch(instr, eq); |
| 2592 __ LoadP(object_map, | 2589 __ LoadP(object_map, |
| 2593 FieldMemOperand(object_prototype, HeapObject::kMapOffset)); | 2590 FieldMemOperand(object_prototype, HeapObject::kMapOffset)); |
| 2594 __ b(&loop); | 2591 __ b(&loop); |
| 2595 } | 2592 } |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2705 | 2702 |
| 2706 | 2703 |
| 2707 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 2704 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 2708 Register context = ToRegister(instr->context()); | 2705 Register context = ToRegister(instr->context()); |
| 2709 Register result = ToRegister(instr->result()); | 2706 Register result = ToRegister(instr->result()); |
| 2710 __ LoadP(result, ContextMemOperand(context, instr->slot_index())); | 2707 __ LoadP(result, ContextMemOperand(context, instr->slot_index())); |
| 2711 if (instr->hydrogen()->RequiresHoleCheck()) { | 2708 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2712 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 2709 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 2713 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2710 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2714 __ cmp(result, ip); | 2711 __ cmp(result, ip); |
| 2715 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 2712 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); |
| 2716 } else { | 2713 } else { |
| 2717 if (CpuFeatures::IsSupported(ISELECT)) { | 2714 if (CpuFeatures::IsSupported(ISELECT)) { |
| 2718 Register scratch = scratch0(); | 2715 Register scratch = scratch0(); |
| 2719 __ mov(scratch, Operand(factory()->undefined_value())); | 2716 __ mov(scratch, Operand(factory()->undefined_value())); |
| 2720 __ cmp(result, ip); | 2717 __ cmp(result, ip); |
| 2721 __ isel(eq, result, scratch, result); | 2718 __ isel(eq, result, scratch, result); |
| 2722 } else { | 2719 } else { |
| 2723 Label skip; | 2720 Label skip; |
| 2724 __ cmp(result, ip); | 2721 __ cmp(result, ip); |
| 2725 __ bne(&skip); | 2722 __ bne(&skip); |
| (...skipping 11 matching lines...) Expand all Loading... |
| 2737 Register scratch = scratch0(); | 2734 Register scratch = scratch0(); |
| 2738 MemOperand target = ContextMemOperand(context, instr->slot_index()); | 2735 MemOperand target = ContextMemOperand(context, instr->slot_index()); |
| 2739 | 2736 |
| 2740 Label skip_assignment; | 2737 Label skip_assignment; |
| 2741 | 2738 |
| 2742 if (instr->hydrogen()->RequiresHoleCheck()) { | 2739 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2743 __ LoadP(scratch, target); | 2740 __ LoadP(scratch, target); |
| 2744 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 2741 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 2745 __ cmp(scratch, ip); | 2742 __ cmp(scratch, ip); |
| 2746 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2743 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2747 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 2744 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); |
| 2748 } else { | 2745 } else { |
| 2749 __ bne(&skip_assignment); | 2746 __ bne(&skip_assignment); |
| 2750 } | 2747 } |
| 2751 } | 2748 } |
| 2752 | 2749 |
| 2753 __ StoreP(value, target, r0); | 2750 __ StoreP(value, target, r0); |
| 2754 if (instr->hydrogen()->NeedsWriteBarrier()) { | 2751 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 2755 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() | 2752 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() |
| 2756 ? OMIT_SMI_CHECK | 2753 ? OMIT_SMI_CHECK |
| 2757 : INLINE_SMI_CHECK; | 2754 : INLINE_SMI_CHECK; |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2824 Register function = ToRegister(instr->function()); | 2821 Register function = ToRegister(instr->function()); |
| 2825 Register result = ToRegister(instr->result()); | 2822 Register result = ToRegister(instr->result()); |
| 2826 | 2823 |
| 2827 // Get the prototype or initial map from the function. | 2824 // Get the prototype or initial map from the function. |
| 2828 __ LoadP(result, | 2825 __ LoadP(result, |
| 2829 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 2826 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 2830 | 2827 |
| 2831 // Check that the function has a prototype or an initial map. | 2828 // Check that the function has a prototype or an initial map. |
| 2832 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 2829 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 2833 __ cmp(result, ip); | 2830 __ cmp(result, ip); |
| 2834 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 2831 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); |
| 2835 | 2832 |
| 2836 // If the function does not have an initial map, we're done. | 2833 // If the function does not have an initial map, we're done. |
| 2837 if (CpuFeatures::IsSupported(ISELECT)) { | 2834 if (CpuFeatures::IsSupported(ISELECT)) { |
| 2838 // Get the prototype from the initial map (optimistic). | 2835 // Get the prototype from the initial map (optimistic). |
| 2839 __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset)); | 2836 __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 2840 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); | 2837 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); |
| 2841 __ isel(eq, result, ip, result); | 2838 __ isel(eq, result, ip, result); |
| 2842 } else { | 2839 } else { |
| 2843 Label done; | 2840 Label done; |
| 2844 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); | 2841 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); |
| (...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2976 break; | 2973 break; |
| 2977 case UINT32_ELEMENTS: | 2974 case UINT32_ELEMENTS: |
| 2978 if (key_is_constant) { | 2975 if (key_is_constant) { |
| 2979 __ LoadWord(result, mem_operand, r0); | 2976 __ LoadWord(result, mem_operand, r0); |
| 2980 } else { | 2977 } else { |
| 2981 __ lwzx(result, mem_operand); | 2978 __ lwzx(result, mem_operand); |
| 2982 } | 2979 } |
| 2983 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 2980 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
| 2984 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); | 2981 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
| 2985 __ cmplw(result, r0); | 2982 __ cmplw(result, r0); |
| 2986 DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue); | 2983 DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue); |
| 2987 } | 2984 } |
| 2988 break; | 2985 break; |
| 2989 case FLOAT32_ELEMENTS: | 2986 case FLOAT32_ELEMENTS: |
| 2990 case FLOAT64_ELEMENTS: | 2987 case FLOAT64_ELEMENTS: |
| 2991 case FAST_HOLEY_DOUBLE_ELEMENTS: | 2988 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 2992 case FAST_HOLEY_ELEMENTS: | 2989 case FAST_HOLEY_ELEMENTS: |
| 2993 case FAST_HOLEY_SMI_ELEMENTS: | 2990 case FAST_HOLEY_SMI_ELEMENTS: |
| 2994 case FAST_DOUBLE_ELEMENTS: | 2991 case FAST_DOUBLE_ELEMENTS: |
| 2995 case FAST_ELEMENTS: | 2992 case FAST_ELEMENTS: |
| 2996 case FAST_SMI_ELEMENTS: | 2993 case FAST_SMI_ELEMENTS: |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3041 | 3038 |
| 3042 if (instr->hydrogen()->RequiresHoleCheck()) { | 3039 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3043 if (is_int16(base_offset + Register::kExponentOffset)) { | 3040 if (is_int16(base_offset + Register::kExponentOffset)) { |
| 3044 __ lwz(scratch, | 3041 __ lwz(scratch, |
| 3045 MemOperand(elements, base_offset + Register::kExponentOffset)); | 3042 MemOperand(elements, base_offset + Register::kExponentOffset)); |
| 3046 } else { | 3043 } else { |
| 3047 __ addi(scratch, elements, Operand(base_offset)); | 3044 __ addi(scratch, elements, Operand(base_offset)); |
| 3048 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset)); | 3045 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset)); |
| 3049 } | 3046 } |
| 3050 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); | 3047 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); |
| 3051 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 3048 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); |
| 3052 } | 3049 } |
| 3053 } | 3050 } |
| 3054 | 3051 |
| 3055 | 3052 |
| 3056 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3053 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
| 3057 HLoadKeyed* hinstr = instr->hydrogen(); | 3054 HLoadKeyed* hinstr = instr->hydrogen(); |
| 3058 Register elements = ToRegister(instr->elements()); | 3055 Register elements = ToRegister(instr->elements()); |
| 3059 Register result = ToRegister(instr->result()); | 3056 Register result = ToRegister(instr->result()); |
| 3060 Register scratch = scratch0(); | 3057 Register scratch = scratch0(); |
| 3061 Register store_base = scratch; | 3058 Register store_base = scratch; |
| (...skipping 30 matching lines...) Expand all Loading... |
| 3092 } | 3089 } |
| 3093 #endif | 3090 #endif |
| 3094 | 3091 |
| 3095 __ LoadRepresentation(result, MemOperand(store_base, offset), representation, | 3092 __ LoadRepresentation(result, MemOperand(store_base, offset), representation, |
| 3096 r0); | 3093 r0); |
| 3097 | 3094 |
| 3098 // Check for the hole value. | 3095 // Check for the hole value. |
| 3099 if (requires_hole_check) { | 3096 if (requires_hole_check) { |
| 3100 if (IsFastSmiElementsKind(hinstr->elements_kind())) { | 3097 if (IsFastSmiElementsKind(hinstr->elements_kind())) { |
| 3101 __ TestIfSmi(result, r0); | 3098 __ TestIfSmi(result, r0); |
| 3102 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); | 3099 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0); |
| 3103 } else { | 3100 } else { |
| 3104 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 3101 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
| 3105 __ cmp(result, scratch); | 3102 __ cmp(result, scratch); |
| 3106 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 3103 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); |
| 3107 } | 3104 } |
| 3108 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { | 3105 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { |
| 3109 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); | 3106 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); |
| 3110 Label done; | 3107 Label done; |
| 3111 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 3108 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
| 3112 __ cmp(result, scratch); | 3109 __ cmp(result, scratch); |
| 3113 __ bne(&done); | 3110 __ bne(&done); |
| 3114 if (info()->IsStub()) { | 3111 if (info()->IsStub()) { |
| 3115 // A stub can safely convert the hole to undefined only if the array | 3112 // A stub can safely convert the hole to undefined only if the array |
| 3116 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise | 3113 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise |
| 3117 // it needs to bail out. | 3114 // it needs to bail out. |
| 3118 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); | 3115 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); |
| 3119 __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset)); | 3116 __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset)); |
| 3120 __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0); | 3117 __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0); |
| 3121 DeoptimizeIf(ne, instr, Deoptimizer::kHole); | 3118 DeoptimizeIf(ne, instr, DeoptimizeReason::kHole); |
| 3122 } | 3119 } |
| 3123 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | 3120 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
| 3124 __ bind(&done); | 3121 __ bind(&done); |
| 3125 } | 3122 } |
| 3126 } | 3123 } |
| 3127 | 3124 |
| 3128 | 3125 |
| 3129 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 3126 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
| 3130 if (instr->is_fixed_typed_array()) { | 3127 if (instr->is_fixed_typed_array()) { |
| 3131 DoLoadKeyedExternalArray(instr); | 3128 DoLoadKeyedExternalArray(instr); |
| (...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3261 // Normal function. Replace undefined or null with global receiver. | 3258 // Normal function. Replace undefined or null with global receiver. |
| 3262 __ LoadRoot(scratch, Heap::kNullValueRootIndex); | 3259 __ LoadRoot(scratch, Heap::kNullValueRootIndex); |
| 3263 __ cmp(receiver, scratch); | 3260 __ cmp(receiver, scratch); |
| 3264 __ beq(&global_object); | 3261 __ beq(&global_object); |
| 3265 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 3262 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
| 3266 __ cmp(receiver, scratch); | 3263 __ cmp(receiver, scratch); |
| 3267 __ beq(&global_object); | 3264 __ beq(&global_object); |
| 3268 | 3265 |
| 3269 // Deoptimize if the receiver is not a JS object. | 3266 // Deoptimize if the receiver is not a JS object. |
| 3270 __ TestIfSmi(receiver, r0); | 3267 __ TestIfSmi(receiver, r0); |
| 3271 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); | 3268 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); |
| 3272 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE); | 3269 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE); |
| 3273 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject); | 3270 DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject); |
| 3274 | 3271 |
| 3275 __ b(&result_in_receiver); | 3272 __ b(&result_in_receiver); |
| 3276 __ bind(&global_object); | 3273 __ bind(&global_object); |
| 3277 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 3274 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
| 3278 __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); | 3275 __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); |
| 3279 __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); | 3276 __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); |
| 3280 | 3277 |
| 3281 if (result.is(receiver)) { | 3278 if (result.is(receiver)) { |
| 3282 __ bind(&result_in_receiver); | 3279 __ bind(&result_in_receiver); |
| 3283 } else { | 3280 } else { |
| (...skipping 13 matching lines...) Expand all Loading... |
| 3297 Register elements = ToRegister(instr->elements()); | 3294 Register elements = ToRegister(instr->elements()); |
| 3298 Register scratch = scratch0(); | 3295 Register scratch = scratch0(); |
| 3299 DCHECK(receiver.is(r3)); // Used for parameter count. | 3296 DCHECK(receiver.is(r3)); // Used for parameter count. |
| 3300 DCHECK(function.is(r4)); // Required by InvokeFunction. | 3297 DCHECK(function.is(r4)); // Required by InvokeFunction. |
| 3301 DCHECK(ToRegister(instr->result()).is(r3)); | 3298 DCHECK(ToRegister(instr->result()).is(r3)); |
| 3302 | 3299 |
| 3303 // Copy the arguments to this function possibly from the | 3300 // Copy the arguments to this function possibly from the |
| 3304 // adaptor frame below it. | 3301 // adaptor frame below it. |
| 3305 const uint32_t kArgumentsLimit = 1 * KB; | 3302 const uint32_t kArgumentsLimit = 1 * KB; |
| 3306 __ cmpli(length, Operand(kArgumentsLimit)); | 3303 __ cmpli(length, Operand(kArgumentsLimit)); |
| 3307 DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments); | 3304 DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments); |
| 3308 | 3305 |
| 3309 // Push the receiver and use the register to keep the original | 3306 // Push the receiver and use the register to keep the original |
| 3310 // number of arguments. | 3307 // number of arguments. |
| 3311 __ push(receiver); | 3308 __ push(receiver); |
| 3312 __ mr(receiver, length); | 3309 __ mr(receiver, length); |
| 3313 // The arguments are at a one pointer size offset from elements. | 3310 // The arguments are at a one pointer size offset from elements. |
| 3314 __ addi(elements, elements, Operand(1 * kPointerSize)); | 3311 __ addi(elements, elements, Operand(1 * kPointerSize)); |
| 3315 | 3312 |
| 3316 // Loop through the arguments pushing them onto the execution | 3313 // Loop through the arguments pushing them onto the execution |
| 3317 // stack. | 3314 // stack. |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3451 DCHECK(instr->context() != NULL); | 3448 DCHECK(instr->context() != NULL); |
| 3452 DCHECK(ToRegister(instr->context()).is(cp)); | 3449 DCHECK(ToRegister(instr->context()).is(cp)); |
| 3453 Register input = ToRegister(instr->value()); | 3450 Register input = ToRegister(instr->value()); |
| 3454 Register result = ToRegister(instr->result()); | 3451 Register result = ToRegister(instr->result()); |
| 3455 Register scratch = scratch0(); | 3452 Register scratch = scratch0(); |
| 3456 | 3453 |
| 3457 // Deoptimize if not a heap number. | 3454 // Deoptimize if not a heap number. |
| 3458 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 3455 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 3459 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3456 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 3460 __ cmp(scratch, ip); | 3457 __ cmp(scratch, ip); |
| 3461 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | 3458 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); |
| 3462 | 3459 |
| 3463 Label done; | 3460 Label done; |
| 3464 Register exponent = scratch0(); | 3461 Register exponent = scratch0(); |
| 3465 scratch = no_reg; | 3462 scratch = no_reg; |
| 3466 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 3463 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
| 3467 // Check the sign of the argument. If the argument is positive, just | 3464 // Check the sign of the argument. If the argument is positive, just |
| 3468 // return it. | 3465 // return it. |
| 3469 __ cmpwi(exponent, Operand::Zero()); | 3466 __ cmpwi(exponent, Operand::Zero()); |
| 3470 // Move the input to the result if necessary. | 3467 // Move the input to the result if necessary. |
| 3471 __ Move(result, input); | 3468 __ Move(result, input); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3521 Register input = ToRegister(instr->value()); | 3518 Register input = ToRegister(instr->value()); |
| 3522 Register result = ToRegister(instr->result()); | 3519 Register result = ToRegister(instr->result()); |
| 3523 Label done; | 3520 Label done; |
| 3524 __ cmpi(input, Operand::Zero()); | 3521 __ cmpi(input, Operand::Zero()); |
| 3525 __ Move(result, input); | 3522 __ Move(result, input); |
| 3526 __ bge(&done); | 3523 __ bge(&done); |
| 3527 __ li(r0, Operand::Zero()); // clear xer | 3524 __ li(r0, Operand::Zero()); // clear xer |
| 3528 __ mtxer(r0); | 3525 __ mtxer(r0); |
| 3529 __ neg(result, result, SetOE, SetRC); | 3526 __ neg(result, result, SetOE, SetRC); |
| 3530 // Deoptimize on overflow. | 3527 // Deoptimize on overflow. |
| 3531 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); | 3528 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); |
| 3532 __ bind(&done); | 3529 __ bind(&done); |
| 3533 } | 3530 } |
| 3534 | 3531 |
| 3535 | 3532 |
| 3536 #if V8_TARGET_ARCH_PPC64 | 3533 #if V8_TARGET_ARCH_PPC64 |
| 3537 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) { | 3534 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) { |
| 3538 Register input = ToRegister(instr->value()); | 3535 Register input = ToRegister(instr->value()); |
| 3539 Register result = ToRegister(instr->result()); | 3536 Register result = ToRegister(instr->result()); |
| 3540 Label done; | 3537 Label done; |
| 3541 __ cmpwi(input, Operand::Zero()); | 3538 __ cmpwi(input, Operand::Zero()); |
| 3542 __ Move(result, input); | 3539 __ Move(result, input); |
| 3543 __ bge(&done); | 3540 __ bge(&done); |
| 3544 | 3541 |
| 3545 // Deoptimize on overflow. | 3542 // Deoptimize on overflow. |
| 3546 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); | 3543 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
| 3547 __ cmpw(input, r0); | 3544 __ cmpw(input, r0); |
| 3548 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); | 3545 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); |
| 3549 | 3546 |
| 3550 __ neg(result, result); | 3547 __ neg(result, result); |
| 3551 __ bind(&done); | 3548 __ bind(&done); |
| 3552 } | 3549 } |
| 3553 #endif | 3550 #endif |
| 3554 | 3551 |
| 3555 | 3552 |
| 3556 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3553 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
| 3557 // Class for deferred case. | 3554 // Class for deferred case. |
| 3558 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { | 3555 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3602 | 3599 |
| 3603 void LCodeGen::DoMathFloorI(LMathFloorI* instr) { | 3600 void LCodeGen::DoMathFloorI(LMathFloorI* instr) { |
| 3604 DoubleRegister input = ToDoubleRegister(instr->value()); | 3601 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3605 Register result = ToRegister(instr->result()); | 3602 Register result = ToRegister(instr->result()); |
| 3606 Register input_high = scratch0(); | 3603 Register input_high = scratch0(); |
| 3607 Register scratch = ip; | 3604 Register scratch = ip; |
| 3608 Label done, exact; | 3605 Label done, exact; |
| 3609 | 3606 |
| 3610 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done, | 3607 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done, |
| 3611 &exact); | 3608 &exact); |
| 3612 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); | 3609 DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN); |
| 3613 | 3610 |
| 3614 __ bind(&exact); | 3611 __ bind(&exact); |
| 3615 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3612 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3616 // Test for -0. | 3613 // Test for -0. |
| 3617 __ cmpi(result, Operand::Zero()); | 3614 __ cmpi(result, Operand::Zero()); |
| 3618 __ bne(&done); | 3615 __ bne(&done); |
| 3619 __ cmpwi(input_high, Operand::Zero()); | 3616 __ cmpwi(input_high, Operand::Zero()); |
| 3620 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 3617 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); |
| 3621 } | 3618 } |
| 3622 __ bind(&done); | 3619 __ bind(&done); |
| 3623 } | 3620 } |
| 3624 | 3621 |
| 3625 void LCodeGen::DoMathRoundD(LMathRoundD* instr) { | 3622 void LCodeGen::DoMathRoundD(LMathRoundD* instr) { |
| 3626 DoubleRegister input_reg = ToDoubleRegister(instr->value()); | 3623 DoubleRegister input_reg = ToDoubleRegister(instr->value()); |
| 3627 DoubleRegister output_reg = ToDoubleRegister(instr->result()); | 3624 DoubleRegister output_reg = ToDoubleRegister(instr->result()); |
| 3628 DoubleRegister dot_five = double_scratch0(); | 3625 DoubleRegister dot_five = double_scratch0(); |
| 3629 Label done; | 3626 Label done; |
| 3630 | 3627 |
| (...skipping 20 matching lines...) Expand all Loading... |
| 3651 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3648 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
| 3652 DoubleRegister input_plus_dot_five = double_scratch1; | 3649 DoubleRegister input_plus_dot_five = double_scratch1; |
| 3653 Register scratch1 = scratch0(); | 3650 Register scratch1 = scratch0(); |
| 3654 Register scratch2 = ip; | 3651 Register scratch2 = ip; |
| 3655 DoubleRegister dot_five = double_scratch0(); | 3652 DoubleRegister dot_five = double_scratch0(); |
| 3656 Label convert, done; | 3653 Label convert, done; |
| 3657 | 3654 |
| 3658 __ LoadDoubleLiteral(dot_five, 0.5, r0); | 3655 __ LoadDoubleLiteral(dot_five, 0.5, r0); |
| 3659 __ fabs(double_scratch1, input); | 3656 __ fabs(double_scratch1, input); |
| 3660 __ fcmpu(double_scratch1, dot_five); | 3657 __ fcmpu(double_scratch1, dot_five); |
| 3661 DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN); | 3658 DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN); |
| 3662 // If input is in [-0.5, -0], the result is -0. | 3659 // If input is in [-0.5, -0], the result is -0. |
| 3663 // If input is in [+0, +0.5[, the result is +0. | 3660 // If input is in [+0, +0.5[, the result is +0. |
| 3664 // If the input is +0.5, the result is 1. | 3661 // If the input is +0.5, the result is 1. |
| 3665 __ bgt(&convert); // Out of [-0.5, +0.5]. | 3662 __ bgt(&convert); // Out of [-0.5, +0.5]. |
| 3666 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3663 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3667 // [-0.5, -0] (negative) yields minus zero. | 3664 // [-0.5, -0] (negative) yields minus zero. |
| 3668 __ TestDoubleSign(input, scratch1); | 3665 __ TestDoubleSign(input, scratch1); |
| 3669 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 3666 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); |
| 3670 } | 3667 } |
| 3671 __ fcmpu(input, dot_five); | 3668 __ fcmpu(input, dot_five); |
| 3672 if (CpuFeatures::IsSupported(ISELECT)) { | 3669 if (CpuFeatures::IsSupported(ISELECT)) { |
| 3673 __ li(result, Operand(1)); | 3670 __ li(result, Operand(1)); |
| 3674 __ isel(lt, result, r0, result); | 3671 __ isel(lt, result, r0, result); |
| 3675 __ b(&done); | 3672 __ b(&done); |
| 3676 } else { | 3673 } else { |
| 3677 Label return_zero; | 3674 Label return_zero; |
| 3678 __ bne(&return_zero); | 3675 __ bne(&return_zero); |
| 3679 __ li(result, Operand(1)); // +0.5. | 3676 __ li(result, Operand(1)); // +0.5. |
| 3680 __ b(&done); | 3677 __ b(&done); |
| 3681 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on | 3678 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on |
| 3682 // flag kBailoutOnMinusZero. | 3679 // flag kBailoutOnMinusZero. |
| 3683 __ bind(&return_zero); | 3680 __ bind(&return_zero); |
| 3684 __ li(result, Operand::Zero()); | 3681 __ li(result, Operand::Zero()); |
| 3685 __ b(&done); | 3682 __ b(&done); |
| 3686 } | 3683 } |
| 3687 | 3684 |
| 3688 __ bind(&convert); | 3685 __ bind(&convert); |
| 3689 __ fadd(input_plus_dot_five, input, dot_five); | 3686 __ fadd(input_plus_dot_five, input, dot_five); |
| 3690 // Reuse dot_five (double_scratch0) as we no longer need this value. | 3687 // Reuse dot_five (double_scratch0) as we no longer need this value. |
| 3691 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2, | 3688 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2, |
| 3692 double_scratch0(), &done, &done); | 3689 double_scratch0(), &done, &done); |
| 3693 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); | 3690 DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN); |
| 3694 __ bind(&done); | 3691 __ bind(&done); |
| 3695 } | 3692 } |
| 3696 | 3693 |
| 3697 | 3694 |
| 3698 void LCodeGen::DoMathFround(LMathFround* instr) { | 3695 void LCodeGen::DoMathFround(LMathFround* instr) { |
| 3699 DoubleRegister input_reg = ToDoubleRegister(instr->value()); | 3696 DoubleRegister input_reg = ToDoubleRegister(instr->value()); |
| 3700 DoubleRegister output_reg = ToDoubleRegister(instr->result()); | 3697 DoubleRegister output_reg = ToDoubleRegister(instr->result()); |
| 3701 __ frsp(output_reg, input_reg); | 3698 __ frsp(output_reg, input_reg); |
| 3702 } | 3699 } |
| 3703 | 3700 |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3748 if (exponent_type.IsSmi()) { | 3745 if (exponent_type.IsSmi()) { |
| 3749 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3746 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3750 __ CallStub(&stub); | 3747 __ CallStub(&stub); |
| 3751 } else if (exponent_type.IsTagged()) { | 3748 } else if (exponent_type.IsTagged()) { |
| 3752 Label no_deopt; | 3749 Label no_deopt; |
| 3753 __ JumpIfSmi(tagged_exponent, &no_deopt); | 3750 __ JumpIfSmi(tagged_exponent, &no_deopt); |
| 3754 DCHECK(!r10.is(tagged_exponent)); | 3751 DCHECK(!r10.is(tagged_exponent)); |
| 3755 __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | 3752 __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); |
| 3756 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3753 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 3757 __ cmp(r10, ip); | 3754 __ cmp(r10, ip); |
| 3758 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | 3755 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); |
| 3759 __ bind(&no_deopt); | 3756 __ bind(&no_deopt); |
| 3760 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3757 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3761 __ CallStub(&stub); | 3758 __ CallStub(&stub); |
| 3762 } else if (exponent_type.IsInteger32()) { | 3759 } else if (exponent_type.IsInteger32()) { |
| 3763 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3760 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
| 3764 __ CallStub(&stub); | 3761 __ CallStub(&stub); |
| 3765 } else { | 3762 } else { |
| 3766 DCHECK(exponent_type.IsDouble()); | 3763 DCHECK(exponent_type.IsDouble()); |
| 3767 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3764 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
| 3768 __ CallStub(&stub); | 3765 __ CallStub(&stub); |
| (...skipping 363 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4132 } else { | 4129 } else { |
| 4133 __ cmplw(length, index); | 4130 __ cmplw(length, index); |
| 4134 } | 4131 } |
| 4135 } | 4132 } |
| 4136 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 4133 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
| 4137 Label done; | 4134 Label done; |
| 4138 __ b(NegateCondition(cc), &done); | 4135 __ b(NegateCondition(cc), &done); |
| 4139 __ stop("eliminated bounds check failed"); | 4136 __ stop("eliminated bounds check failed"); |
| 4140 __ bind(&done); | 4137 __ bind(&done); |
| 4141 } else { | 4138 } else { |
| 4142 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); | 4139 DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds); |
| 4143 } | 4140 } |
| 4144 } | 4141 } |
| 4145 | 4142 |
| 4146 | 4143 |
| 4147 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4144 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| 4148 Register external_pointer = ToRegister(instr->elements()); | 4145 Register external_pointer = ToRegister(instr->elements()); |
| 4149 Register key = no_reg; | 4146 Register key = no_reg; |
| 4150 ElementsKind elements_kind = instr->elements_kind(); | 4147 ElementsKind elements_kind = instr->elements_kind(); |
| 4151 bool key_is_constant = instr->key()->IsConstantOperand(); | 4148 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 4152 int constant_key = 0; | 4149 int constant_key = 0; |
| (...skipping 297 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4450 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(), | 4447 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(), |
| 4451 instr->hydrogen()->kind()); | 4448 instr->hydrogen()->kind()); |
| 4452 __ CallStub(&stub); | 4449 __ CallStub(&stub); |
| 4453 RecordSafepointWithLazyDeopt( | 4450 RecordSafepointWithLazyDeopt( |
| 4454 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 4451 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
| 4455 __ StoreToSafepointRegisterSlot(result, result); | 4452 __ StoreToSafepointRegisterSlot(result, result); |
| 4456 } | 4453 } |
| 4457 | 4454 |
| 4458 // Deopt on smi, which means the elements array changed to dictionary mode. | 4455 // Deopt on smi, which means the elements array changed to dictionary mode. |
| 4459 __ TestIfSmi(result, r0); | 4456 __ TestIfSmi(result, r0); |
| 4460 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); | 4457 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); |
| 4461 } | 4458 } |
| 4462 | 4459 |
| 4463 | 4460 |
| 4464 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { | 4461 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
| 4465 Register object_reg = ToRegister(instr->object()); | 4462 Register object_reg = ToRegister(instr->object()); |
| 4466 Register scratch = scratch0(); | 4463 Register scratch = scratch0(); |
| 4467 | 4464 |
| 4468 Handle<Map> from_map = instr->original_map(); | 4465 Handle<Map> from_map = instr->original_map(); |
| 4469 Handle<Map> to_map = instr->transitioned_map(); | 4466 Handle<Map> to_map = instr->transitioned_map(); |
| 4470 ElementsKind from_kind = instr->from_kind(); | 4467 ElementsKind from_kind = instr->from_kind(); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 4496 __ bind(¬_applicable); | 4493 __ bind(¬_applicable); |
| 4497 } | 4494 } |
| 4498 | 4495 |
| 4499 | 4496 |
| 4500 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4497 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 4501 Register object = ToRegister(instr->object()); | 4498 Register object = ToRegister(instr->object()); |
| 4502 Register temp1 = ToRegister(instr->temp1()); | 4499 Register temp1 = ToRegister(instr->temp1()); |
| 4503 Register temp2 = ToRegister(instr->temp2()); | 4500 Register temp2 = ToRegister(instr->temp2()); |
| 4504 Label no_memento_found; | 4501 Label no_memento_found; |
| 4505 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); | 4502 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); |
| 4506 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); | 4503 DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound); |
| 4507 __ bind(&no_memento_found); | 4504 __ bind(&no_memento_found); |
| 4508 } | 4505 } |
| 4509 | 4506 |
| 4510 | 4507 |
| 4511 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4508 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| 4512 DCHECK(ToRegister(instr->context()).is(cp)); | 4509 DCHECK(ToRegister(instr->context()).is(cp)); |
| 4513 DCHECK(ToRegister(instr->left()).is(r4)); | 4510 DCHECK(ToRegister(instr->left()).is(r4)); |
| 4514 DCHECK(ToRegister(instr->right()).is(r3)); | 4511 DCHECK(ToRegister(instr->right()).is(r3)); |
| 4515 StringAddStub stub(isolate(), instr->hydrogen()->flags(), | 4512 StringAddStub stub(isolate(), instr->hydrogen()->flags(), |
| 4516 instr->hydrogen()->pretenure_flag()); | 4513 instr->hydrogen()->pretenure_flag()); |
| (...skipping 290 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4807 } | 4804 } |
| 4808 | 4805 |
| 4809 | 4806 |
| 4810 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4807 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 4811 HChange* hchange = instr->hydrogen(); | 4808 HChange* hchange = instr->hydrogen(); |
| 4812 Register input = ToRegister(instr->value()); | 4809 Register input = ToRegister(instr->value()); |
| 4813 Register output = ToRegister(instr->result()); | 4810 Register output = ToRegister(instr->result()); |
| 4814 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4811 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4815 hchange->value()->CheckFlag(HValue::kUint32)) { | 4812 hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4816 __ TestUnsignedSmiCandidate(input, r0); | 4813 __ TestUnsignedSmiCandidate(input, r0); |
| 4817 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0); | 4814 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0); |
| 4818 } | 4815 } |
| 4819 #if !V8_TARGET_ARCH_PPC64 | 4816 #if !V8_TARGET_ARCH_PPC64 |
| 4820 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4817 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4821 !hchange->value()->CheckFlag(HValue::kUint32)) { | 4818 !hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4822 __ SmiTagCheckOverflow(output, input, r0); | 4819 __ SmiTagCheckOverflow(output, input, r0); |
| 4823 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); | 4820 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); |
| 4824 } else { | 4821 } else { |
| 4825 #endif | 4822 #endif |
| 4826 __ SmiTag(output, input); | 4823 __ SmiTag(output, input); |
| 4827 #if !V8_TARGET_ARCH_PPC64 | 4824 #if !V8_TARGET_ARCH_PPC64 |
| 4828 } | 4825 } |
| 4829 #endif | 4826 #endif |
| 4830 } | 4827 } |
| 4831 | 4828 |
| 4832 | 4829 |
| 4833 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4830 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 4834 Register scratch = scratch0(); | 4831 Register scratch = scratch0(); |
| 4835 Register input = ToRegister(instr->value()); | 4832 Register input = ToRegister(instr->value()); |
| 4836 Register result = ToRegister(instr->result()); | 4833 Register result = ToRegister(instr->result()); |
| 4837 if (instr->needs_check()) { | 4834 if (instr->needs_check()) { |
| 4838 // If the input is a HeapObject, value of scratch won't be zero. | 4835 // If the input is a HeapObject, value of scratch won't be zero. |
| 4839 __ andi(scratch, input, Operand(kHeapObjectTag)); | 4836 __ andi(scratch, input, Operand(kHeapObjectTag)); |
| 4840 __ SmiUntag(result, input); | 4837 __ SmiUntag(result, input); |
| 4841 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); | 4838 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0); |
| 4842 } else { | 4839 } else { |
| 4843 __ SmiUntag(result, input); | 4840 __ SmiUntag(result, input); |
| 4844 } | 4841 } |
| 4845 } | 4842 } |
| 4846 | 4843 |
| 4847 | 4844 |
| 4848 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 4845 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
| 4849 DoubleRegister result_reg, | 4846 DoubleRegister result_reg, |
| 4850 NumberUntagDMode mode) { | 4847 NumberUntagDMode mode) { |
| 4851 bool can_convert_undefined_to_nan = | 4848 bool can_convert_undefined_to_nan = |
| 4852 instr->hydrogen()->can_convert_undefined_to_nan(); | 4849 instr->hydrogen()->can_convert_undefined_to_nan(); |
| 4853 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | 4850 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); |
| 4854 | 4851 |
| 4855 Register scratch = scratch0(); | 4852 Register scratch = scratch0(); |
| 4856 DCHECK(!result_reg.is(double_scratch0())); | 4853 DCHECK(!result_reg.is(double_scratch0())); |
| 4857 | 4854 |
| 4858 Label convert, load_smi, done; | 4855 Label convert, load_smi, done; |
| 4859 | 4856 |
| 4860 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4857 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| 4861 // Smi check. | 4858 // Smi check. |
| 4862 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 4859 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
| 4863 | 4860 |
| 4864 // Heap number map check. | 4861 // Heap number map check. |
| 4865 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4862 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 4866 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 4863 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 4867 __ cmp(scratch, ip); | 4864 __ cmp(scratch, ip); |
| 4868 if (can_convert_undefined_to_nan) { | 4865 if (can_convert_undefined_to_nan) { |
| 4869 __ bne(&convert); | 4866 __ bne(&convert); |
| 4870 } else { | 4867 } else { |
| 4871 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | 4868 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); |
| 4872 } | 4869 } |
| 4873 // load heap number | 4870 // load heap number |
| 4874 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 4871 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 4875 if (deoptimize_on_minus_zero) { | 4872 if (deoptimize_on_minus_zero) { |
| 4876 __ TestDoubleIsMinusZero(result_reg, scratch, ip); | 4873 __ TestDoubleIsMinusZero(result_reg, scratch, ip); |
| 4877 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 4874 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 4878 } | 4875 } |
| 4879 __ b(&done); | 4876 __ b(&done); |
| 4880 if (can_convert_undefined_to_nan) { | 4877 if (can_convert_undefined_to_nan) { |
| 4881 __ bind(&convert); | 4878 __ bind(&convert); |
| 4882 // Convert undefined (and hole) to NaN. | 4879 // Convert undefined (and hole) to NaN. |
| 4883 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 4880 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 4884 __ cmp(input_reg, ip); | 4881 __ cmp(input_reg, ip); |
| 4885 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); | 4882 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined); |
| 4886 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4883 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
| 4887 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | 4884 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
| 4888 __ b(&done); | 4885 __ b(&done); |
| 4889 } | 4886 } |
| 4890 } else { | 4887 } else { |
| 4891 __ SmiUntag(scratch, input_reg); | 4888 __ SmiUntag(scratch, input_reg); |
| 4892 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4889 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
| 4893 } | 4890 } |
| 4894 // Smi to double register conversion | 4891 // Smi to double register conversion |
| 4895 __ bind(&load_smi); | 4892 __ bind(&load_smi); |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4937 __ bind(&check_bools); | 4934 __ bind(&check_bools); |
| 4938 __ LoadRoot(ip, Heap::kTrueValueRootIndex); | 4935 __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
| 4939 __ cmp(input_reg, ip); | 4936 __ cmp(input_reg, ip); |
| 4940 __ bne(&check_false); | 4937 __ bne(&check_false); |
| 4941 __ li(input_reg, Operand(1)); | 4938 __ li(input_reg, Operand(1)); |
| 4942 __ b(&done); | 4939 __ b(&done); |
| 4943 | 4940 |
| 4944 __ bind(&check_false); | 4941 __ bind(&check_false); |
| 4945 __ LoadRoot(ip, Heap::kFalseValueRootIndex); | 4942 __ LoadRoot(ip, Heap::kFalseValueRootIndex); |
| 4946 __ cmp(input_reg, ip); | 4943 __ cmp(input_reg, ip); |
| 4947 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean); | 4944 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean); |
| 4948 __ li(input_reg, Operand::Zero()); | 4945 __ li(input_reg, Operand::Zero()); |
| 4949 } else { | 4946 } else { |
| 4950 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | 4947 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); |
| 4951 | 4948 |
| 4952 __ lfd(double_scratch2, | 4949 __ lfd(double_scratch2, |
| 4953 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 4950 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 4954 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4951 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4955 // preserve heap number pointer in scratch2 for minus zero check below | 4952 // preserve heap number pointer in scratch2 for minus zero check below |
| 4956 __ mr(scratch2, input_reg); | 4953 __ mr(scratch2, input_reg); |
| 4957 } | 4954 } |
| 4958 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1, | 4955 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1, |
| 4959 double_scratch); | 4956 double_scratch); |
| 4960 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); | 4957 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); |
| 4961 | 4958 |
| 4962 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4959 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4963 __ cmpi(input_reg, Operand::Zero()); | 4960 __ cmpi(input_reg, Operand::Zero()); |
| 4964 __ bne(&done); | 4961 __ bne(&done); |
| 4965 __ TestHeapNumberSign(scratch2, scratch1); | 4962 __ TestHeapNumberSign(scratch2, scratch1); |
| 4966 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 4963 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); |
| 4967 } | 4964 } |
| 4968 } | 4965 } |
| 4969 __ bind(&done); | 4966 __ bind(&done); |
| 4970 } | 4967 } |
| 4971 | 4968 |
| 4972 | 4969 |
| 4973 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 4970 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 4974 class DeferredTaggedToI final : public LDeferredCode { | 4971 class DeferredTaggedToI final : public LDeferredCode { |
| 4975 public: | 4972 public: |
| 4976 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 4973 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5025 Register scratch1 = scratch0(); | 5022 Register scratch1 = scratch0(); |
| 5026 DoubleRegister double_input = ToDoubleRegister(instr->value()); | 5023 DoubleRegister double_input = ToDoubleRegister(instr->value()); |
| 5027 DoubleRegister double_scratch = double_scratch0(); | 5024 DoubleRegister double_scratch = double_scratch0(); |
| 5028 | 5025 |
| 5029 if (instr->truncating()) { | 5026 if (instr->truncating()) { |
| 5030 __ TruncateDoubleToI(result_reg, double_input); | 5027 __ TruncateDoubleToI(result_reg, double_input); |
| 5031 } else { | 5028 } else { |
| 5032 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, | 5029 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, |
| 5033 double_scratch); | 5030 double_scratch); |
| 5034 // Deoptimize if the input wasn't a int32 (inside a double). | 5031 // Deoptimize if the input wasn't a int32 (inside a double). |
| 5035 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); | 5032 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); |
| 5036 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5033 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5037 Label done; | 5034 Label done; |
| 5038 __ cmpi(result_reg, Operand::Zero()); | 5035 __ cmpi(result_reg, Operand::Zero()); |
| 5039 __ bne(&done); | 5036 __ bne(&done); |
| 5040 __ TestDoubleSign(double_input, scratch1); | 5037 __ TestDoubleSign(double_input, scratch1); |
| 5041 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 5038 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); |
| 5042 __ bind(&done); | 5039 __ bind(&done); |
| 5043 } | 5040 } |
| 5044 } | 5041 } |
| 5045 } | 5042 } |
| 5046 | 5043 |
| 5047 | 5044 |
| 5048 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 5045 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
| 5049 Register result_reg = ToRegister(instr->result()); | 5046 Register result_reg = ToRegister(instr->result()); |
| 5050 Register scratch1 = scratch0(); | 5047 Register scratch1 = scratch0(); |
| 5051 DoubleRegister double_input = ToDoubleRegister(instr->value()); | 5048 DoubleRegister double_input = ToDoubleRegister(instr->value()); |
| 5052 DoubleRegister double_scratch = double_scratch0(); | 5049 DoubleRegister double_scratch = double_scratch0(); |
| 5053 | 5050 |
| 5054 if (instr->truncating()) { | 5051 if (instr->truncating()) { |
| 5055 __ TruncateDoubleToI(result_reg, double_input); | 5052 __ TruncateDoubleToI(result_reg, double_input); |
| 5056 } else { | 5053 } else { |
| 5057 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, | 5054 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, |
| 5058 double_scratch); | 5055 double_scratch); |
| 5059 // Deoptimize if the input wasn't a int32 (inside a double). | 5056 // Deoptimize if the input wasn't a int32 (inside a double). |
| 5060 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); | 5057 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); |
| 5061 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5058 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5062 Label done; | 5059 Label done; |
| 5063 __ cmpi(result_reg, Operand::Zero()); | 5060 __ cmpi(result_reg, Operand::Zero()); |
| 5064 __ bne(&done); | 5061 __ bne(&done); |
| 5065 __ TestDoubleSign(double_input, scratch1); | 5062 __ TestDoubleSign(double_input, scratch1); |
| 5066 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 5063 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); |
| 5067 __ bind(&done); | 5064 __ bind(&done); |
| 5068 } | 5065 } |
| 5069 } | 5066 } |
| 5070 #if V8_TARGET_ARCH_PPC64 | 5067 #if V8_TARGET_ARCH_PPC64 |
| 5071 __ SmiTag(result_reg); | 5068 __ SmiTag(result_reg); |
| 5072 #else | 5069 #else |
| 5073 __ SmiTagCheckOverflow(result_reg, r0); | 5070 __ SmiTagCheckOverflow(result_reg, r0); |
| 5074 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); | 5071 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); |
| 5075 #endif | 5072 #endif |
| 5076 } | 5073 } |
| 5077 | 5074 |
| 5078 | 5075 |
| 5079 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 5076 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 5080 LOperand* input = instr->value(); | 5077 LOperand* input = instr->value(); |
| 5081 __ TestIfSmi(ToRegister(input), r0); | 5078 __ TestIfSmi(ToRegister(input), r0); |
| 5082 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); | 5079 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0); |
| 5083 } | 5080 } |
| 5084 | 5081 |
| 5085 | 5082 |
| 5086 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 5083 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| 5087 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 5084 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
| 5088 LOperand* input = instr->value(); | 5085 LOperand* input = instr->value(); |
| 5089 __ TestIfSmi(ToRegister(input), r0); | 5086 __ TestIfSmi(ToRegister(input), r0); |
| 5090 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); | 5087 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); |
| 5091 } | 5088 } |
| 5092 } | 5089 } |
| 5093 | 5090 |
| 5094 | 5091 |
| 5095 void LCodeGen::DoCheckArrayBufferNotNeutered( | 5092 void LCodeGen::DoCheckArrayBufferNotNeutered( |
| 5096 LCheckArrayBufferNotNeutered* instr) { | 5093 LCheckArrayBufferNotNeutered* instr) { |
| 5097 Register view = ToRegister(instr->view()); | 5094 Register view = ToRegister(instr->view()); |
| 5098 Register scratch = scratch0(); | 5095 Register scratch = scratch0(); |
| 5099 | 5096 |
| 5100 __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); | 5097 __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); |
| 5101 __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); | 5098 __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); |
| 5102 __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); | 5099 __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); |
| 5103 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0); | 5100 DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0); |
| 5104 } | 5101 } |
| 5105 | 5102 |
| 5106 | 5103 |
| 5107 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 5104 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 5108 Register input = ToRegister(instr->value()); | 5105 Register input = ToRegister(instr->value()); |
| 5109 Register scratch = scratch0(); | 5106 Register scratch = scratch0(); |
| 5110 | 5107 |
| 5111 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 5108 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 5112 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 5109 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
| 5113 | 5110 |
| 5114 if (instr->hydrogen()->is_interval_check()) { | 5111 if (instr->hydrogen()->is_interval_check()) { |
| 5115 InstanceType first; | 5112 InstanceType first; |
| 5116 InstanceType last; | 5113 InstanceType last; |
| 5117 instr->hydrogen()->GetCheckInterval(&first, &last); | 5114 instr->hydrogen()->GetCheckInterval(&first, &last); |
| 5118 | 5115 |
| 5119 __ cmpli(scratch, Operand(first)); | 5116 __ cmpli(scratch, Operand(first)); |
| 5120 | 5117 |
| 5121 // If there is only one type in the interval check for equality. | 5118 // If there is only one type in the interval check for equality. |
| 5122 if (first == last) { | 5119 if (first == last) { |
| 5123 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); | 5120 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); |
| 5124 } else { | 5121 } else { |
| 5125 DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType); | 5122 DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType); |
| 5126 // Omit check for the last type. | 5123 // Omit check for the last type. |
| 5127 if (last != LAST_TYPE) { | 5124 if (last != LAST_TYPE) { |
| 5128 __ cmpli(scratch, Operand(last)); | 5125 __ cmpli(scratch, Operand(last)); |
| 5129 DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType); | 5126 DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType); |
| 5130 } | 5127 } |
| 5131 } | 5128 } |
| 5132 } else { | 5129 } else { |
| 5133 uint8_t mask; | 5130 uint8_t mask; |
| 5134 uint8_t tag; | 5131 uint8_t tag; |
| 5135 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5132 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| 5136 | 5133 |
| 5137 if (base::bits::IsPowerOfTwo32(mask)) { | 5134 if (base::bits::IsPowerOfTwo32(mask)) { |
| 5138 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 5135 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
| 5139 __ andi(r0, scratch, Operand(mask)); | 5136 __ andi(r0, scratch, Operand(mask)); |
| 5140 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType, | 5137 DeoptimizeIf(tag == 0 ? ne : eq, instr, |
| 5141 cr0); | 5138 DeoptimizeReason::kWrongInstanceType, cr0); |
| 5142 } else { | 5139 } else { |
| 5143 __ andi(scratch, scratch, Operand(mask)); | 5140 __ andi(scratch, scratch, Operand(mask)); |
| 5144 __ cmpi(scratch, Operand(tag)); | 5141 __ cmpi(scratch, Operand(tag)); |
| 5145 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); | 5142 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); |
| 5146 } | 5143 } |
| 5147 } | 5144 } |
| 5148 } | 5145 } |
| 5149 | 5146 |
| 5150 | 5147 |
| 5151 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 5148 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
| 5152 Register reg = ToRegister(instr->value()); | 5149 Register reg = ToRegister(instr->value()); |
| 5153 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | 5150 Handle<HeapObject> object = instr->hydrogen()->object().handle(); |
| 5154 AllowDeferredHandleDereference smi_check; | 5151 AllowDeferredHandleDereference smi_check; |
| 5155 if (isolate()->heap()->InNewSpace(*object)) { | 5152 if (isolate()->heap()->InNewSpace(*object)) { |
| 5156 Register reg = ToRegister(instr->value()); | 5153 Register reg = ToRegister(instr->value()); |
| 5157 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 5154 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
| 5158 __ mov(ip, Operand(cell)); | 5155 __ mov(ip, Operand(cell)); |
| 5159 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset)); | 5156 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset)); |
| 5160 __ cmp(reg, ip); | 5157 __ cmp(reg, ip); |
| 5161 } else { | 5158 } else { |
| 5162 __ Cmpi(reg, Operand(object), r0); | 5159 __ Cmpi(reg, Operand(object), r0); |
| 5163 } | 5160 } |
| 5164 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); | 5161 DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch); |
| 5165 } | 5162 } |
| 5166 | 5163 |
| 5167 | 5164 |
| 5168 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5165 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
| 5169 Register temp = ToRegister(instr->temp()); | 5166 Register temp = ToRegister(instr->temp()); |
| 5170 { | 5167 { |
| 5171 PushSafepointRegistersScope scope(this); | 5168 PushSafepointRegistersScope scope(this); |
| 5172 __ push(object); | 5169 __ push(object); |
| 5173 __ li(cp, Operand::Zero()); | 5170 __ li(cp, Operand::Zero()); |
| 5174 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 5171 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
| 5175 RecordSafepointWithRegisters(instr->pointer_map(), 1, | 5172 RecordSafepointWithRegisters(instr->pointer_map(), 1, |
| 5176 Safepoint::kNoLazyDeopt); | 5173 Safepoint::kNoLazyDeopt); |
| 5177 __ StoreToSafepointRegisterSlot(r3, temp); | 5174 __ StoreToSafepointRegisterSlot(r3, temp); |
| 5178 } | 5175 } |
| 5179 __ TestIfSmi(temp, r0); | 5176 __ TestIfSmi(temp, r0); |
| 5180 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0); | 5177 DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0); |
| 5181 } | 5178 } |
| 5182 | 5179 |
| 5183 | 5180 |
| 5184 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5181 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 5185 class DeferredCheckMaps final : public LDeferredCode { | 5182 class DeferredCheckMaps final : public LDeferredCode { |
| 5186 public: | 5183 public: |
| 5187 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 5184 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
| 5188 : LDeferredCode(codegen), instr_(instr), object_(object) { | 5185 : LDeferredCode(codegen), instr_(instr), object_(object) { |
| 5189 SetExit(check_maps()); | 5186 SetExit(check_maps()); |
| 5190 } | 5187 } |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5225 Handle<Map> map = maps->at(i).handle(); | 5222 Handle<Map> map = maps->at(i).handle(); |
| 5226 __ CompareMap(map_reg, map, &success); | 5223 __ CompareMap(map_reg, map, &success); |
| 5227 __ beq(&success); | 5224 __ beq(&success); |
| 5228 } | 5225 } |
| 5229 | 5226 |
| 5230 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5227 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
| 5231 __ CompareMap(map_reg, map, &success); | 5228 __ CompareMap(map_reg, map, &success); |
| 5232 if (instr->hydrogen()->HasMigrationTarget()) { | 5229 if (instr->hydrogen()->HasMigrationTarget()) { |
| 5233 __ bne(deferred->entry()); | 5230 __ bne(deferred->entry()); |
| 5234 } else { | 5231 } else { |
| 5235 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); | 5232 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); |
| 5236 } | 5233 } |
| 5237 | 5234 |
| 5238 __ bind(&success); | 5235 __ bind(&success); |
| 5239 } | 5236 } |
| 5240 | 5237 |
| 5241 | 5238 |
| 5242 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5239 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 5243 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5240 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| 5244 Register result_reg = ToRegister(instr->result()); | 5241 Register result_reg = ToRegister(instr->result()); |
| 5245 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); | 5242 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 5264 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); | 5261 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); |
| 5265 | 5262 |
| 5266 // Check for heap number | 5263 // Check for heap number |
| 5267 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5264 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 5268 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0); | 5265 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0); |
| 5269 __ beq(&heap_number); | 5266 __ beq(&heap_number); |
| 5270 | 5267 |
| 5271 // Check for undefined. Undefined is converted to zero for clamping | 5268 // Check for undefined. Undefined is converted to zero for clamping |
| 5272 // conversions. | 5269 // conversions. |
| 5273 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0); | 5270 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0); |
| 5274 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); | 5271 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined); |
| 5275 __ li(result_reg, Operand::Zero()); | 5272 __ li(result_reg, Operand::Zero()); |
| 5276 __ b(&done); | 5273 __ b(&done); |
| 5277 | 5274 |
| 5278 // Heap number | 5275 // Heap number |
| 5279 __ bind(&heap_number); | 5276 __ bind(&heap_number); |
| 5280 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 5277 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 5281 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); | 5278 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); |
| 5282 __ b(&done); | 5279 __ b(&done); |
| 5283 | 5280 |
| 5284 // smi | 5281 // smi |
| (...skipping 410 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5695 __ CmpSmiLiteral(result, Smi::FromInt(0), r0); | 5692 __ CmpSmiLiteral(result, Smi::FromInt(0), r0); |
| 5696 __ bne(&load_cache); | 5693 __ bne(&load_cache); |
| 5697 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); | 5694 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); |
| 5698 __ b(&done); | 5695 __ b(&done); |
| 5699 | 5696 |
| 5700 __ bind(&load_cache); | 5697 __ bind(&load_cache); |
| 5701 __ LoadInstanceDescriptors(map, result); | 5698 __ LoadInstanceDescriptors(map, result); |
| 5702 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 5699 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
| 5703 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 5700 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
| 5704 __ cmpi(result, Operand::Zero()); | 5701 __ cmpi(result, Operand::Zero()); |
| 5705 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache); | 5702 DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache); |
| 5706 | 5703 |
| 5707 __ bind(&done); | 5704 __ bind(&done); |
| 5708 } | 5705 } |
| 5709 | 5706 |
| 5710 | 5707 |
| 5711 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5708 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| 5712 Register object = ToRegister(instr->value()); | 5709 Register object = ToRegister(instr->value()); |
| 5713 Register map = ToRegister(instr->map()); | 5710 Register map = ToRegister(instr->map()); |
| 5714 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 5711 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5715 __ cmp(map, scratch0()); | 5712 __ cmp(map, scratch0()); |
| 5716 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); | 5713 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); |
| 5717 } | 5714 } |
| 5718 | 5715 |
| 5719 | 5716 |
| 5720 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5717 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
| 5721 Register result, Register object, | 5718 Register result, Register object, |
| 5722 Register index) { | 5719 Register index) { |
| 5723 PushSafepointRegistersScope scope(this); | 5720 PushSafepointRegistersScope scope(this); |
| 5724 __ Push(object, index); | 5721 __ Push(object, index); |
| 5725 __ li(cp, Operand::Zero()); | 5722 __ li(cp, Operand::Zero()); |
| 5726 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); | 5723 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5784 __ LoadP(result, | 5781 __ LoadP(result, |
| 5785 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); | 5782 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); |
| 5786 __ bind(deferred->exit()); | 5783 __ bind(deferred->exit()); |
| 5787 __ bind(&done); | 5784 __ bind(&done); |
| 5788 } | 5785 } |
| 5789 | 5786 |
| 5790 #undef __ | 5787 #undef __ |
| 5791 | 5788 |
| 5792 } // namespace internal | 5789 } // namespace internal |
| 5793 } // namespace v8 | 5790 } // namespace v8 |
| OLD | NEW |