| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/arm64/lithium-codegen-arm64.h" | 7 #include "src/arm64/lithium-codegen-arm64.h" |
| 8 #include "src/arm64/lithium-gap-resolver-arm64.h" | 8 #include "src/arm64/lithium-gap-resolver-arm64.h" |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
| (...skipping 979 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 990 | 990 |
| 991 for (int i = 0, length = inlined_closures->length(); i < length; i++) { | 991 for (int i = 0, length = inlined_closures->length(); i < length; i++) { |
| 992 DefineDeoptimizationLiteral(inlined_closures->at(i)); | 992 DefineDeoptimizationLiteral(inlined_closures->at(i)); |
| 993 } | 993 } |
| 994 | 994 |
| 995 inlined_function_count_ = deoptimization_literals_.length(); | 995 inlined_function_count_ = deoptimization_literals_.length(); |
| 996 } | 996 } |
| 997 | 997 |
| 998 | 998 |
| 999 void LCodeGen::DeoptimizeBranch( | 999 void LCodeGen::DeoptimizeBranch( |
| 1000 LEnvironment* environment, | 1000 LInstruction* instr, BranchType branch_type, Register reg, int bit, |
| 1001 BranchType branch_type, Register reg, int bit, | |
| 1002 Deoptimizer::BailoutType* override_bailout_type) { | 1001 Deoptimizer::BailoutType* override_bailout_type) { |
| 1002 LEnvironment* environment = instr->environment(); |
| 1003 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 1003 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 1004 Deoptimizer::BailoutType bailout_type = | 1004 Deoptimizer::BailoutType bailout_type = |
| 1005 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; | 1005 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; |
| 1006 | 1006 |
| 1007 if (override_bailout_type != NULL) { | 1007 if (override_bailout_type != NULL) { |
| 1008 bailout_type = *override_bailout_type; | 1008 bailout_type = *override_bailout_type; |
| 1009 } | 1009 } |
| 1010 | 1010 |
| 1011 DCHECK(environment->HasBeenRegistered()); | 1011 DCHECK(environment->HasBeenRegistered()); |
| 1012 DCHECK(info()->IsOptimizing() || info()->IsStub()); | 1012 DCHECK(info()->IsOptimizing() || info()->IsStub()); |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1065 bailout_type, | 1065 bailout_type, |
| 1066 !frame_is_built_); | 1066 !frame_is_built_); |
| 1067 deopt_jump_table_.Add(table_entry, zone()); | 1067 deopt_jump_table_.Add(table_entry, zone()); |
| 1068 } | 1068 } |
| 1069 __ B(&deopt_jump_table_.last()->label, | 1069 __ B(&deopt_jump_table_.last()->label, |
| 1070 branch_type, reg, bit); | 1070 branch_type, reg, bit); |
| 1071 } | 1071 } |
| 1072 } | 1072 } |
| 1073 | 1073 |
| 1074 | 1074 |
| 1075 void LCodeGen::Deoptimize(LEnvironment* environment, | 1075 void LCodeGen::Deoptimize(LInstruction* instr, |
| 1076 Deoptimizer::BailoutType* override_bailout_type) { | 1076 Deoptimizer::BailoutType* override_bailout_type) { |
| 1077 DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type); | 1077 DeoptimizeBranch(instr, always, NoReg, -1, override_bailout_type); |
| 1078 } | 1078 } |
| 1079 | 1079 |
| 1080 | 1080 |
| 1081 void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) { | 1081 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr) { |
| 1082 DeoptimizeBranch(environment, static_cast<BranchType>(cond)); | 1082 DeoptimizeBranch(instr, static_cast<BranchType>(cond)); |
| 1083 } | 1083 } |
| 1084 | 1084 |
| 1085 | 1085 |
| 1086 void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) { | 1086 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr) { |
| 1087 DeoptimizeBranch(environment, reg_zero, rt); | 1087 DeoptimizeBranch(instr, reg_zero, rt); |
| 1088 } | 1088 } |
| 1089 | 1089 |
| 1090 | 1090 |
| 1091 void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) { | 1091 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr) { |
| 1092 DeoptimizeBranch(environment, reg_not_zero, rt); | 1092 DeoptimizeBranch(instr, reg_not_zero, rt); |
| 1093 } | 1093 } |
| 1094 | 1094 |
| 1095 | 1095 |
| 1096 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) { | 1096 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr) { |
| 1097 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; | 1097 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; |
| 1098 DeoptimizeIfBitSet(rt, sign_bit, environment); | 1098 DeoptimizeIfBitSet(rt, sign_bit, instr); |
| 1099 } | 1099 } |
| 1100 | 1100 |
| 1101 | 1101 |
| 1102 void LCodeGen::DeoptimizeIfSmi(Register rt, | 1102 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr) { |
| 1103 LEnvironment* environment) { | 1103 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr); |
| 1104 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), environment); | |
| 1105 } | 1104 } |
| 1106 | 1105 |
| 1107 | 1106 |
| 1108 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) { | 1107 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr) { |
| 1109 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), environment); | 1108 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr); |
| 1110 } | 1109 } |
| 1111 | 1110 |
| 1112 | 1111 |
| 1113 void LCodeGen::DeoptimizeIfRoot(Register rt, | 1112 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, |
| 1114 Heap::RootListIndex index, | 1113 LInstruction* instr) { |
| 1115 LEnvironment* environment) { | |
| 1116 __ CompareRoot(rt, index); | 1114 __ CompareRoot(rt, index); |
| 1117 DeoptimizeIf(eq, environment); | 1115 DeoptimizeIf(eq, instr); |
| 1118 } | 1116 } |
| 1119 | 1117 |
| 1120 | 1118 |
| 1121 void LCodeGen::DeoptimizeIfNotRoot(Register rt, | 1119 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, |
| 1122 Heap::RootListIndex index, | 1120 LInstruction* instr) { |
| 1123 LEnvironment* environment) { | |
| 1124 __ CompareRoot(rt, index); | 1121 __ CompareRoot(rt, index); |
| 1125 DeoptimizeIf(ne, environment); | 1122 DeoptimizeIf(ne, instr); |
| 1126 } | 1123 } |
| 1127 | 1124 |
| 1128 | 1125 |
| 1129 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, | 1126 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, |
| 1130 LEnvironment* environment) { | 1127 LInstruction* instr) { |
| 1131 __ TestForMinusZero(input); | 1128 __ TestForMinusZero(input); |
| 1132 DeoptimizeIf(vs, environment); | 1129 DeoptimizeIf(vs, instr); |
| 1133 } | 1130 } |
| 1134 | 1131 |
| 1135 | 1132 |
| 1136 void LCodeGen::DeoptimizeIfBitSet(Register rt, | 1133 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr) { |
| 1137 int bit, | 1134 DeoptimizeBranch(instr, reg_bit_set, rt, bit); |
| 1138 LEnvironment* environment) { | |
| 1139 DeoptimizeBranch(environment, reg_bit_set, rt, bit); | |
| 1140 } | 1135 } |
| 1141 | 1136 |
| 1142 | 1137 |
| 1143 void LCodeGen::DeoptimizeIfBitClear(Register rt, | 1138 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr) { |
| 1144 int bit, | 1139 DeoptimizeBranch(instr, reg_bit_clear, rt, bit); |
| 1145 LEnvironment* environment) { | |
| 1146 DeoptimizeBranch(environment, reg_bit_clear, rt, bit); | |
| 1147 } | 1140 } |
| 1148 | 1141 |
| 1149 | 1142 |
| 1150 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { | 1143 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { |
| 1151 if (!info()->IsStub()) { | 1144 if (!info()->IsStub()) { |
| 1152 // Ensure that we have enough space after the previous lazy-bailout | 1145 // Ensure that we have enough space after the previous lazy-bailout |
| 1153 // instruction for patching the code here. | 1146 // instruction for patching the code here. |
| 1154 intptr_t current_pc = masm()->pc_offset(); | 1147 intptr_t current_pc = masm()->pc_offset(); |
| 1155 | 1148 |
| 1156 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { | 1149 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { |
| (...skipping 353 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1510 | 1503 |
| 1511 | 1504 |
| 1512 void LCodeGen::DoAddI(LAddI* instr) { | 1505 void LCodeGen::DoAddI(LAddI* instr) { |
| 1513 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1506 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1514 Register result = ToRegister32(instr->result()); | 1507 Register result = ToRegister32(instr->result()); |
| 1515 Register left = ToRegister32(instr->left()); | 1508 Register left = ToRegister32(instr->left()); |
| 1516 Operand right = ToShiftedRightOperand32(instr->right(), instr); | 1509 Operand right = ToShiftedRightOperand32(instr->right(), instr); |
| 1517 | 1510 |
| 1518 if (can_overflow) { | 1511 if (can_overflow) { |
| 1519 __ Adds(result, left, right); | 1512 __ Adds(result, left, right); |
| 1520 DeoptimizeIf(vs, instr->environment()); | 1513 DeoptimizeIf(vs, instr); |
| 1521 } else { | 1514 } else { |
| 1522 __ Add(result, left, right); | 1515 __ Add(result, left, right); |
| 1523 } | 1516 } |
| 1524 } | 1517 } |
| 1525 | 1518 |
| 1526 | 1519 |
| 1527 void LCodeGen::DoAddS(LAddS* instr) { | 1520 void LCodeGen::DoAddS(LAddS* instr) { |
| 1528 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1521 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1529 Register result = ToRegister(instr->result()); | 1522 Register result = ToRegister(instr->result()); |
| 1530 Register left = ToRegister(instr->left()); | 1523 Register left = ToRegister(instr->left()); |
| 1531 Operand right = ToOperand(instr->right()); | 1524 Operand right = ToOperand(instr->right()); |
| 1532 if (can_overflow) { | 1525 if (can_overflow) { |
| 1533 __ Adds(result, left, right); | 1526 __ Adds(result, left, right); |
| 1534 DeoptimizeIf(vs, instr->environment()); | 1527 DeoptimizeIf(vs, instr); |
| 1535 } else { | 1528 } else { |
| 1536 __ Add(result, left, right); | 1529 __ Add(result, left, right); |
| 1537 } | 1530 } |
| 1538 } | 1531 } |
| 1539 | 1532 |
| 1540 | 1533 |
| 1541 void LCodeGen::DoAllocate(LAllocate* instr) { | 1534 void LCodeGen::DoAllocate(LAllocate* instr) { |
| 1542 class DeferredAllocate: public LDeferredCode { | 1535 class DeferredAllocate: public LDeferredCode { |
| 1543 public: | 1536 public: |
| 1544 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) | 1537 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1650 Register scratch = x5; | 1643 Register scratch = x5; |
| 1651 DCHECK(receiver.Is(x0)); // Used for parameter count. | 1644 DCHECK(receiver.Is(x0)); // Used for parameter count. |
| 1652 DCHECK(function.Is(x1)); // Required by InvokeFunction. | 1645 DCHECK(function.Is(x1)); // Required by InvokeFunction. |
| 1653 DCHECK(ToRegister(instr->result()).Is(x0)); | 1646 DCHECK(ToRegister(instr->result()).Is(x0)); |
| 1654 DCHECK(instr->IsMarkedAsCall()); | 1647 DCHECK(instr->IsMarkedAsCall()); |
| 1655 | 1648 |
| 1656 // Copy the arguments to this function possibly from the | 1649 // Copy the arguments to this function possibly from the |
| 1657 // adaptor frame below it. | 1650 // adaptor frame below it. |
| 1658 const uint32_t kArgumentsLimit = 1 * KB; | 1651 const uint32_t kArgumentsLimit = 1 * KB; |
| 1659 __ Cmp(length, kArgumentsLimit); | 1652 __ Cmp(length, kArgumentsLimit); |
| 1660 DeoptimizeIf(hi, instr->environment()); | 1653 DeoptimizeIf(hi, instr); |
| 1661 | 1654 |
| 1662 // Push the receiver and use the register to keep the original | 1655 // Push the receiver and use the register to keep the original |
| 1663 // number of arguments. | 1656 // number of arguments. |
| 1664 __ Push(receiver); | 1657 __ Push(receiver); |
| 1665 Register argc = receiver; | 1658 Register argc = receiver; |
| 1666 receiver = NoReg; | 1659 receiver = NoReg; |
| 1667 __ Sxtw(argc, length); | 1660 __ Sxtw(argc, length); |
| 1668 // The arguments are at a one pointer size offset from elements. | 1661 // The arguments are at a one pointer size offset from elements. |
| 1669 __ Add(elements, elements, 1 * kPointerSize); | 1662 __ Add(elements, elements, 1 * kPointerSize); |
| 1670 | 1663 |
| (...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1832 __ Cmp(length, index); | 1825 __ Cmp(length, index); |
| 1833 cond = CommuteCondition(cond); | 1826 cond = CommuteCondition(cond); |
| 1834 } else { | 1827 } else { |
| 1835 Register index = ToRegister32(instr->index()); | 1828 Register index = ToRegister32(instr->index()); |
| 1836 Operand length = ToOperand32(instr->length()); | 1829 Operand length = ToOperand32(instr->length()); |
| 1837 __ Cmp(index, length); | 1830 __ Cmp(index, length); |
| 1838 } | 1831 } |
| 1839 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 1832 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
| 1840 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); | 1833 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); |
| 1841 } else { | 1834 } else { |
| 1842 DeoptimizeIf(cond, instr->environment()); | 1835 DeoptimizeIf(cond, instr); |
| 1843 } | 1836 } |
| 1844 } | 1837 } |
| 1845 | 1838 |
| 1846 | 1839 |
| 1847 void LCodeGen::DoBranch(LBranch* instr) { | 1840 void LCodeGen::DoBranch(LBranch* instr) { |
| 1848 Representation r = instr->hydrogen()->value()->representation(); | 1841 Representation r = instr->hydrogen()->value()->representation(); |
| 1849 Label* true_label = instr->TrueLabel(chunk_); | 1842 Label* true_label = instr->TrueLabel(chunk_); |
| 1850 Label* false_label = instr->FalseLabel(chunk_); | 1843 Label* false_label = instr->FalseLabel(chunk_); |
| 1851 | 1844 |
| 1852 if (r.IsInteger32()) { | 1845 if (r.IsInteger32()) { |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1911 value, Heap::kNullValueRootIndex, false_label); | 1904 value, Heap::kNullValueRootIndex, false_label); |
| 1912 } | 1905 } |
| 1913 | 1906 |
| 1914 if (expected.Contains(ToBooleanStub::SMI)) { | 1907 if (expected.Contains(ToBooleanStub::SMI)) { |
| 1915 // Smis: 0 -> false, all other -> true. | 1908 // Smis: 0 -> false, all other -> true. |
| 1916 DCHECK(Smi::FromInt(0) == 0); | 1909 DCHECK(Smi::FromInt(0) == 0); |
| 1917 __ Cbz(value, false_label); | 1910 __ Cbz(value, false_label); |
| 1918 __ JumpIfSmi(value, true_label); | 1911 __ JumpIfSmi(value, true_label); |
| 1919 } else if (expected.NeedsMap()) { | 1912 } else if (expected.NeedsMap()) { |
| 1920 // If we need a map later and have a smi, deopt. | 1913 // If we need a map later and have a smi, deopt. |
| 1921 DeoptimizeIfSmi(value, instr->environment()); | 1914 DeoptimizeIfSmi(value, instr); |
| 1922 } | 1915 } |
| 1923 | 1916 |
| 1924 Register map = NoReg; | 1917 Register map = NoReg; |
| 1925 Register scratch = NoReg; | 1918 Register scratch = NoReg; |
| 1926 | 1919 |
| 1927 if (expected.NeedsMap()) { | 1920 if (expected.NeedsMap()) { |
| 1928 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); | 1921 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); |
| 1929 map = ToRegister(instr->temp1()); | 1922 map = ToRegister(instr->temp1()); |
| 1930 scratch = ToRegister(instr->temp2()); | 1923 scratch = ToRegister(instr->temp2()); |
| 1931 | 1924 |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1972 // If we got a NaN (overflow bit is set), jump to the false branch. | 1965 // If we got a NaN (overflow bit is set), jump to the false branch. |
| 1973 __ B(vs, false_label); | 1966 __ B(vs, false_label); |
| 1974 __ B(eq, false_label); | 1967 __ B(eq, false_label); |
| 1975 __ B(true_label); | 1968 __ B(true_label); |
| 1976 __ Bind(¬_heap_number); | 1969 __ Bind(¬_heap_number); |
| 1977 } | 1970 } |
| 1978 | 1971 |
| 1979 if (!expected.IsGeneric()) { | 1972 if (!expected.IsGeneric()) { |
| 1980 // We've seen something for the first time -> deopt. | 1973 // We've seen something for the first time -> deopt. |
| 1981 // This can only happen if we are not generic already. | 1974 // This can only happen if we are not generic already. |
| 1982 Deoptimize(instr->environment()); | 1975 Deoptimize(instr); |
| 1983 } | 1976 } |
| 1984 } | 1977 } |
| 1985 } | 1978 } |
| 1986 } | 1979 } |
| 1987 | 1980 |
| 1988 | 1981 |
| 1989 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, | 1982 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
| 1990 int formal_parameter_count, | 1983 int formal_parameter_count, |
| 1991 int arity, | 1984 int arity, |
| 1992 LInstruction* instr, | 1985 LInstruction* instr, |
| (...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2157 Register temp = ToRegister(instr->temp()); | 2150 Register temp = ToRegister(instr->temp()); |
| 2158 { | 2151 { |
| 2159 PushSafepointRegistersScope scope(this); | 2152 PushSafepointRegistersScope scope(this); |
| 2160 __ Push(object); | 2153 __ Push(object); |
| 2161 __ Mov(cp, 0); | 2154 __ Mov(cp, 0); |
| 2162 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 2155 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
| 2163 RecordSafepointWithRegisters( | 2156 RecordSafepointWithRegisters( |
| 2164 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 2157 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
| 2165 __ StoreToSafepointRegisterSlot(x0, temp); | 2158 __ StoreToSafepointRegisterSlot(x0, temp); |
| 2166 } | 2159 } |
| 2167 DeoptimizeIfSmi(temp, instr->environment()); | 2160 DeoptimizeIfSmi(temp, instr); |
| 2168 } | 2161 } |
| 2169 | 2162 |
| 2170 | 2163 |
| 2171 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 2164 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 2172 class DeferredCheckMaps: public LDeferredCode { | 2165 class DeferredCheckMaps: public LDeferredCode { |
| 2173 public: | 2166 public: |
| 2174 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 2167 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
| 2175 : LDeferredCode(codegen), instr_(instr), object_(object) { | 2168 : LDeferredCode(codegen), instr_(instr), object_(object) { |
| 2176 SetExit(check_maps()); | 2169 SetExit(check_maps()); |
| 2177 } | 2170 } |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2212 __ CompareMap(map_reg, map); | 2205 __ CompareMap(map_reg, map); |
| 2213 __ B(eq, &success); | 2206 __ B(eq, &success); |
| 2214 } | 2207 } |
| 2215 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 2208 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
| 2216 __ CompareMap(map_reg, map); | 2209 __ CompareMap(map_reg, map); |
| 2217 | 2210 |
| 2218 // We didn't match a map. | 2211 // We didn't match a map. |
| 2219 if (instr->hydrogen()->HasMigrationTarget()) { | 2212 if (instr->hydrogen()->HasMigrationTarget()) { |
| 2220 __ B(ne, deferred->entry()); | 2213 __ B(ne, deferred->entry()); |
| 2221 } else { | 2214 } else { |
| 2222 DeoptimizeIf(ne, instr->environment()); | 2215 DeoptimizeIf(ne, instr); |
| 2223 } | 2216 } |
| 2224 | 2217 |
| 2225 __ Bind(&success); | 2218 __ Bind(&success); |
| 2226 } | 2219 } |
| 2227 | 2220 |
| 2228 | 2221 |
| 2229 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 2222 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| 2230 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 2223 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
| 2231 DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment()); | 2224 DeoptimizeIfSmi(ToRegister(instr->value()), instr); |
| 2232 } | 2225 } |
| 2233 } | 2226 } |
| 2234 | 2227 |
| 2235 | 2228 |
| 2236 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 2229 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 2237 Register value = ToRegister(instr->value()); | 2230 Register value = ToRegister(instr->value()); |
| 2238 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value)); | 2231 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value)); |
| 2239 DeoptimizeIfNotSmi(value, instr->environment()); | 2232 DeoptimizeIfNotSmi(value, instr); |
| 2240 } | 2233 } |
| 2241 | 2234 |
| 2242 | 2235 |
| 2243 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 2236 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 2244 Register input = ToRegister(instr->value()); | 2237 Register input = ToRegister(instr->value()); |
| 2245 Register scratch = ToRegister(instr->temp()); | 2238 Register scratch = ToRegister(instr->temp()); |
| 2246 | 2239 |
| 2247 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 2240 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 2248 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 2241 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
| 2249 | 2242 |
| 2250 if (instr->hydrogen()->is_interval_check()) { | 2243 if (instr->hydrogen()->is_interval_check()) { |
| 2251 InstanceType first, last; | 2244 InstanceType first, last; |
| 2252 instr->hydrogen()->GetCheckInterval(&first, &last); | 2245 instr->hydrogen()->GetCheckInterval(&first, &last); |
| 2253 | 2246 |
| 2254 __ Cmp(scratch, first); | 2247 __ Cmp(scratch, first); |
| 2255 if (first == last) { | 2248 if (first == last) { |
| 2256 // If there is only one type in the interval check for equality. | 2249 // If there is only one type in the interval check for equality. |
| 2257 DeoptimizeIf(ne, instr->environment()); | 2250 DeoptimizeIf(ne, instr); |
| 2258 } else if (last == LAST_TYPE) { | 2251 } else if (last == LAST_TYPE) { |
| 2259 // We don't need to compare with the higher bound of the interval. | 2252 // We don't need to compare with the higher bound of the interval. |
| 2260 DeoptimizeIf(lo, instr->environment()); | 2253 DeoptimizeIf(lo, instr); |
| 2261 } else { | 2254 } else { |
| 2262 // If we are below the lower bound, set the C flag and clear the Z flag | 2255 // If we are below the lower bound, set the C flag and clear the Z flag |
| 2263 // to force a deopt. | 2256 // to force a deopt. |
| 2264 __ Ccmp(scratch, last, CFlag, hs); | 2257 __ Ccmp(scratch, last, CFlag, hs); |
| 2265 DeoptimizeIf(hi, instr->environment()); | 2258 DeoptimizeIf(hi, instr); |
| 2266 } | 2259 } |
| 2267 } else { | 2260 } else { |
| 2268 uint8_t mask; | 2261 uint8_t mask; |
| 2269 uint8_t tag; | 2262 uint8_t tag; |
| 2270 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 2263 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| 2271 | 2264 |
| 2272 if (base::bits::IsPowerOfTwo32(mask)) { | 2265 if (base::bits::IsPowerOfTwo32(mask)) { |
| 2273 DCHECK((tag == 0) || (tag == mask)); | 2266 DCHECK((tag == 0) || (tag == mask)); |
| 2274 if (tag == 0) { | 2267 if (tag == 0) { |
| 2275 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment()); | 2268 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr); |
| 2276 } else { | 2269 } else { |
| 2277 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment()); | 2270 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr); |
| 2278 } | 2271 } |
| 2279 } else { | 2272 } else { |
| 2280 if (tag == 0) { | 2273 if (tag == 0) { |
| 2281 __ Tst(scratch, mask); | 2274 __ Tst(scratch, mask); |
| 2282 } else { | 2275 } else { |
| 2283 __ And(scratch, scratch, mask); | 2276 __ And(scratch, scratch, mask); |
| 2284 __ Cmp(scratch, tag); | 2277 __ Cmp(scratch, tag); |
| 2285 } | 2278 } |
| 2286 DeoptimizeIf(ne, instr->environment()); | 2279 DeoptimizeIf(ne, instr); |
| 2287 } | 2280 } |
| 2288 } | 2281 } |
| 2289 } | 2282 } |
| 2290 | 2283 |
| 2291 | 2284 |
| 2292 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 2285 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 2293 DoubleRegister input = ToDoubleRegister(instr->unclamped()); | 2286 DoubleRegister input = ToDoubleRegister(instr->unclamped()); |
| 2294 Register result = ToRegister32(instr->result()); | 2287 Register result = ToRegister32(instr->result()); |
| 2295 __ ClampDoubleToUint8(result, input, double_scratch()); | 2288 __ ClampDoubleToUint8(result, input, double_scratch()); |
| 2296 } | 2289 } |
| (...skipping 20 matching lines...) Expand all Loading... |
| 2317 __ B(&done); | 2310 __ B(&done); |
| 2318 | 2311 |
| 2319 __ Bind(&is_not_smi); | 2312 __ Bind(&is_not_smi); |
| 2320 | 2313 |
| 2321 // Check for heap number. | 2314 // Check for heap number. |
| 2322 Label is_heap_number; | 2315 Label is_heap_number; |
| 2323 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 2316 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 2324 __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number); | 2317 __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number); |
| 2325 | 2318 |
| 2326 // Check for undefined. Undefined is coverted to zero for clamping conversion. | 2319 // Check for undefined. Undefined is coverted to zero for clamping conversion. |
| 2327 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, | 2320 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr); |
| 2328 instr->environment()); | |
| 2329 __ Mov(result, 0); | 2321 __ Mov(result, 0); |
| 2330 __ B(&done); | 2322 __ B(&done); |
| 2331 | 2323 |
| 2332 // Heap number case. | 2324 // Heap number case. |
| 2333 __ Bind(&is_heap_number); | 2325 __ Bind(&is_heap_number); |
| 2334 DoubleRegister dbl_scratch = double_scratch(); | 2326 DoubleRegister dbl_scratch = double_scratch(); |
| 2335 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2()); | 2327 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2()); |
| 2336 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); | 2328 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); |
| 2337 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); | 2329 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); |
| 2338 | 2330 |
| (...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2624 if (isolate()->heap()->InNewSpace(*object)) { | 2616 if (isolate()->heap()->InNewSpace(*object)) { |
| 2625 UseScratchRegisterScope temps(masm()); | 2617 UseScratchRegisterScope temps(masm()); |
| 2626 Register temp = temps.AcquireX(); | 2618 Register temp = temps.AcquireX(); |
| 2627 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 2619 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
| 2628 __ Mov(temp, Operand(Handle<Object>(cell))); | 2620 __ Mov(temp, Operand(Handle<Object>(cell))); |
| 2629 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); | 2621 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); |
| 2630 __ Cmp(reg, temp); | 2622 __ Cmp(reg, temp); |
| 2631 } else { | 2623 } else { |
| 2632 __ Cmp(reg, Operand(object)); | 2624 __ Cmp(reg, Operand(object)); |
| 2633 } | 2625 } |
| 2634 DeoptimizeIf(ne, instr->environment()); | 2626 DeoptimizeIf(ne, instr); |
| 2635 } | 2627 } |
| 2636 | 2628 |
| 2637 | 2629 |
| 2638 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { | 2630 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
| 2639 last_lazy_deopt_pc_ = masm()->pc_offset(); | 2631 last_lazy_deopt_pc_ = masm()->pc_offset(); |
| 2640 DCHECK(instr->HasEnvironment()); | 2632 DCHECK(instr->HasEnvironment()); |
| 2641 LEnvironment* env = instr->environment(); | 2633 LEnvironment* env = instr->environment(); |
| 2642 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 2634 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
| 2643 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 2635 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
| 2644 } | 2636 } |
| 2645 | 2637 |
| 2646 | 2638 |
| 2647 void LCodeGen::DoDateField(LDateField* instr) { | 2639 void LCodeGen::DoDateField(LDateField* instr) { |
| 2648 Register object = ToRegister(instr->date()); | 2640 Register object = ToRegister(instr->date()); |
| 2649 Register result = ToRegister(instr->result()); | 2641 Register result = ToRegister(instr->result()); |
| 2650 Register temp1 = x10; | 2642 Register temp1 = x10; |
| 2651 Register temp2 = x11; | 2643 Register temp2 = x11; |
| 2652 Smi* index = instr->index(); | 2644 Smi* index = instr->index(); |
| 2653 Label runtime, done; | 2645 Label runtime, done; |
| 2654 | 2646 |
| 2655 DCHECK(object.is(result) && object.Is(x0)); | 2647 DCHECK(object.is(result) && object.Is(x0)); |
| 2656 DCHECK(instr->IsMarkedAsCall()); | 2648 DCHECK(instr->IsMarkedAsCall()); |
| 2657 | 2649 |
| 2658 DeoptimizeIfSmi(object, instr->environment()); | 2650 DeoptimizeIfSmi(object, instr); |
| 2659 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE); | 2651 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE); |
| 2660 DeoptimizeIf(ne, instr->environment()); | 2652 DeoptimizeIf(ne, instr); |
| 2661 | 2653 |
| 2662 if (index->value() == 0) { | 2654 if (index->value() == 0) { |
| 2663 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); | 2655 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); |
| 2664 } else { | 2656 } else { |
| 2665 if (index->value() < JSDate::kFirstUncachedField) { | 2657 if (index->value() < JSDate::kFirstUncachedField) { |
| 2666 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 2658 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
| 2667 __ Mov(temp1, Operand(stamp)); | 2659 __ Mov(temp1, Operand(stamp)); |
| 2668 __ Ldr(temp1, MemOperand(temp1)); | 2660 __ Ldr(temp1, MemOperand(temp1)); |
| 2669 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset)); | 2661 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset)); |
| 2670 __ Cmp(temp1, temp2); | 2662 __ Cmp(temp1, temp2); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 2687 Deoptimizer::BailoutType type = instr->hydrogen()->type(); | 2679 Deoptimizer::BailoutType type = instr->hydrogen()->type(); |
| 2688 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the | 2680 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the |
| 2689 // needed return address), even though the implementation of LAZY and EAGER is | 2681 // needed return address), even though the implementation of LAZY and EAGER is |
| 2690 // now identical. When LAZY is eventually completely folded into EAGER, remove | 2682 // now identical. When LAZY is eventually completely folded into EAGER, remove |
| 2691 // the special case below. | 2683 // the special case below. |
| 2692 if (info()->IsStub() && (type == Deoptimizer::EAGER)) { | 2684 if (info()->IsStub() && (type == Deoptimizer::EAGER)) { |
| 2693 type = Deoptimizer::LAZY; | 2685 type = Deoptimizer::LAZY; |
| 2694 } | 2686 } |
| 2695 | 2687 |
| 2696 Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); | 2688 Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); |
| 2697 Deoptimize(instr->environment(), &type); | 2689 Deoptimize(instr, &type); |
| 2698 } | 2690 } |
| 2699 | 2691 |
| 2700 | 2692 |
| 2701 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 2693 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 2702 Register dividend = ToRegister32(instr->dividend()); | 2694 Register dividend = ToRegister32(instr->dividend()); |
| 2703 int32_t divisor = instr->divisor(); | 2695 int32_t divisor = instr->divisor(); |
| 2704 Register result = ToRegister32(instr->result()); | 2696 Register result = ToRegister32(instr->result()); |
| 2705 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 2697 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
| 2706 DCHECK(!result.is(dividend)); | 2698 DCHECK(!result.is(dividend)); |
| 2707 | 2699 |
| 2708 // Check for (0 / -x) that will produce negative zero. | 2700 // Check for (0 / -x) that will produce negative zero. |
| 2709 HDiv* hdiv = instr->hydrogen(); | 2701 HDiv* hdiv = instr->hydrogen(); |
| 2710 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 2702 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 2711 DeoptimizeIfZero(dividend, instr->environment()); | 2703 DeoptimizeIfZero(dividend, instr); |
| 2712 } | 2704 } |
| 2713 // Check for (kMinInt / -1). | 2705 // Check for (kMinInt / -1). |
| 2714 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 2706 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
| 2715 // Test dividend for kMinInt by subtracting one (cmp) and checking for | 2707 // Test dividend for kMinInt by subtracting one (cmp) and checking for |
| 2716 // overflow. | 2708 // overflow. |
| 2717 __ Cmp(dividend, 1); | 2709 __ Cmp(dividend, 1); |
| 2718 DeoptimizeIf(vs, instr->environment()); | 2710 DeoptimizeIf(vs, instr); |
| 2719 } | 2711 } |
| 2720 // Deoptimize if remainder will not be 0. | 2712 // Deoptimize if remainder will not be 0. |
| 2721 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 2713 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
| 2722 divisor != 1 && divisor != -1) { | 2714 divisor != 1 && divisor != -1) { |
| 2723 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 2715 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 2724 __ Tst(dividend, mask); | 2716 __ Tst(dividend, mask); |
| 2725 DeoptimizeIf(ne, instr->environment()); | 2717 DeoptimizeIf(ne, instr); |
| 2726 } | 2718 } |
| 2727 | 2719 |
| 2728 if (divisor == -1) { // Nice shortcut, not needed for correctness. | 2720 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
| 2729 __ Neg(result, dividend); | 2721 __ Neg(result, dividend); |
| 2730 return; | 2722 return; |
| 2731 } | 2723 } |
| 2732 int32_t shift = WhichPowerOf2Abs(divisor); | 2724 int32_t shift = WhichPowerOf2Abs(divisor); |
| 2733 if (shift == 0) { | 2725 if (shift == 0) { |
| 2734 __ Mov(result, dividend); | 2726 __ Mov(result, dividend); |
| 2735 } else if (shift == 1) { | 2727 } else if (shift == 1) { |
| 2736 __ Add(result, dividend, Operand(dividend, LSR, 31)); | 2728 __ Add(result, dividend, Operand(dividend, LSR, 31)); |
| 2737 } else { | 2729 } else { |
| 2738 __ Mov(result, Operand(dividend, ASR, 31)); | 2730 __ Mov(result, Operand(dividend, ASR, 31)); |
| 2739 __ Add(result, dividend, Operand(result, LSR, 32 - shift)); | 2731 __ Add(result, dividend, Operand(result, LSR, 32 - shift)); |
| 2740 } | 2732 } |
| 2741 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); | 2733 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); |
| 2742 if (divisor < 0) __ Neg(result, result); | 2734 if (divisor < 0) __ Neg(result, result); |
| 2743 } | 2735 } |
| 2744 | 2736 |
| 2745 | 2737 |
| 2746 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 2738 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
| 2747 Register dividend = ToRegister32(instr->dividend()); | 2739 Register dividend = ToRegister32(instr->dividend()); |
| 2748 int32_t divisor = instr->divisor(); | 2740 int32_t divisor = instr->divisor(); |
| 2749 Register result = ToRegister32(instr->result()); | 2741 Register result = ToRegister32(instr->result()); |
| 2750 DCHECK(!AreAliased(dividend, result)); | 2742 DCHECK(!AreAliased(dividend, result)); |
| 2751 | 2743 |
| 2752 if (divisor == 0) { | 2744 if (divisor == 0) { |
| 2753 Deoptimize(instr->environment()); | 2745 Deoptimize(instr); |
| 2754 return; | 2746 return; |
| 2755 } | 2747 } |
| 2756 | 2748 |
| 2757 // Check for (0 / -x) that will produce negative zero. | 2749 // Check for (0 / -x) that will produce negative zero. |
| 2758 HDiv* hdiv = instr->hydrogen(); | 2750 HDiv* hdiv = instr->hydrogen(); |
| 2759 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 2751 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 2760 DeoptimizeIfZero(dividend, instr->environment()); | 2752 DeoptimizeIfZero(dividend, instr); |
| 2761 } | 2753 } |
| 2762 | 2754 |
| 2763 __ TruncatingDiv(result, dividend, Abs(divisor)); | 2755 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 2764 if (divisor < 0) __ Neg(result, result); | 2756 if (divisor < 0) __ Neg(result, result); |
| 2765 | 2757 |
| 2766 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 2758 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 2767 Register temp = ToRegister32(instr->temp()); | 2759 Register temp = ToRegister32(instr->temp()); |
| 2768 DCHECK(!AreAliased(dividend, result, temp)); | 2760 DCHECK(!AreAliased(dividend, result, temp)); |
| 2769 __ Sxtw(dividend.X(), dividend); | 2761 __ Sxtw(dividend.X(), dividend); |
| 2770 __ Mov(temp, divisor); | 2762 __ Mov(temp, divisor); |
| 2771 __ Smsubl(temp.X(), result, temp, dividend.X()); | 2763 __ Smsubl(temp.X(), result, temp, dividend.X()); |
| 2772 DeoptimizeIfNotZero(temp, instr->environment()); | 2764 DeoptimizeIfNotZero(temp, instr); |
| 2773 } | 2765 } |
| 2774 } | 2766 } |
| 2775 | 2767 |
| 2776 | 2768 |
| 2777 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 2769 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
| 2778 void LCodeGen::DoDivI(LDivI* instr) { | 2770 void LCodeGen::DoDivI(LDivI* instr) { |
| 2779 HBinaryOperation* hdiv = instr->hydrogen(); | 2771 HBinaryOperation* hdiv = instr->hydrogen(); |
| 2780 Register dividend = ToRegister32(instr->dividend()); | 2772 Register dividend = ToRegister32(instr->dividend()); |
| 2781 Register divisor = ToRegister32(instr->divisor()); | 2773 Register divisor = ToRegister32(instr->divisor()); |
| 2782 Register result = ToRegister32(instr->result()); | 2774 Register result = ToRegister32(instr->result()); |
| 2783 | 2775 |
| 2784 // Issue the division first, and then check for any deopt cases whilst the | 2776 // Issue the division first, and then check for any deopt cases whilst the |
| 2785 // result is computed. | 2777 // result is computed. |
| 2786 __ Sdiv(result, dividend, divisor); | 2778 __ Sdiv(result, dividend, divisor); |
| 2787 | 2779 |
| 2788 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 2780 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 2789 DCHECK_EQ(NULL, instr->temp()); | 2781 DCHECK_EQ(NULL, instr->temp()); |
| 2790 return; | 2782 return; |
| 2791 } | 2783 } |
| 2792 | 2784 |
| 2793 // Check for x / 0. | 2785 // Check for x / 0. |
| 2794 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 2786 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 2795 DeoptimizeIfZero(divisor, instr->environment()); | 2787 DeoptimizeIfZero(divisor, instr); |
| 2796 } | 2788 } |
| 2797 | 2789 |
| 2798 // Check for (0 / -x) as that will produce negative zero. | 2790 // Check for (0 / -x) as that will produce negative zero. |
| 2799 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 2791 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 2800 __ Cmp(divisor, 0); | 2792 __ Cmp(divisor, 0); |
| 2801 | 2793 |
| 2802 // If the divisor < 0 (mi), compare the dividend, and deopt if it is | 2794 // If the divisor < 0 (mi), compare the dividend, and deopt if it is |
| 2803 // zero, ie. zero dividend with negative divisor deopts. | 2795 // zero, ie. zero dividend with negative divisor deopts. |
| 2804 // If the divisor >= 0 (pl, the opposite of mi) set the flags to | 2796 // If the divisor >= 0 (pl, the opposite of mi) set the flags to |
| 2805 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. | 2797 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. |
| 2806 __ Ccmp(dividend, 0, NoFlag, mi); | 2798 __ Ccmp(dividend, 0, NoFlag, mi); |
| 2807 DeoptimizeIf(eq, instr->environment()); | 2799 DeoptimizeIf(eq, instr); |
| 2808 } | 2800 } |
| 2809 | 2801 |
| 2810 // Check for (kMinInt / -1). | 2802 // Check for (kMinInt / -1). |
| 2811 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | 2803 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
| 2812 // Test dividend for kMinInt by subtracting one (cmp) and checking for | 2804 // Test dividend for kMinInt by subtracting one (cmp) and checking for |
| 2813 // overflow. | 2805 // overflow. |
| 2814 __ Cmp(dividend, 1); | 2806 __ Cmp(dividend, 1); |
| 2815 // If overflow is set, ie. dividend = kMinInt, compare the divisor with | 2807 // If overflow is set, ie. dividend = kMinInt, compare the divisor with |
| 2816 // -1. If overflow is clear, set the flags for condition ne, as the | 2808 // -1. If overflow is clear, set the flags for condition ne, as the |
| 2817 // dividend isn't -1, and thus we shouldn't deopt. | 2809 // dividend isn't -1, and thus we shouldn't deopt. |
| 2818 __ Ccmp(divisor, -1, NoFlag, vs); | 2810 __ Ccmp(divisor, -1, NoFlag, vs); |
| 2819 DeoptimizeIf(eq, instr->environment()); | 2811 DeoptimizeIf(eq, instr); |
| 2820 } | 2812 } |
| 2821 | 2813 |
| 2822 // Compute remainder and deopt if it's not zero. | 2814 // Compute remainder and deopt if it's not zero. |
| 2823 Register remainder = ToRegister32(instr->temp()); | 2815 Register remainder = ToRegister32(instr->temp()); |
| 2824 __ Msub(remainder, result, divisor, dividend); | 2816 __ Msub(remainder, result, divisor, dividend); |
| 2825 DeoptimizeIfNotZero(remainder, instr->environment()); | 2817 DeoptimizeIfNotZero(remainder, instr); |
| 2826 } | 2818 } |
| 2827 | 2819 |
| 2828 | 2820 |
| 2829 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { | 2821 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { |
| 2830 DoubleRegister input = ToDoubleRegister(instr->value()); | 2822 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 2831 Register result = ToRegister32(instr->result()); | 2823 Register result = ToRegister32(instr->result()); |
| 2832 | 2824 |
| 2833 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 2825 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 2834 DeoptimizeIfMinusZero(input, instr->environment()); | 2826 DeoptimizeIfMinusZero(input, instr); |
| 2835 } | 2827 } |
| 2836 | 2828 |
| 2837 __ TryRepresentDoubleAsInt32(result, input, double_scratch()); | 2829 __ TryRepresentDoubleAsInt32(result, input, double_scratch()); |
| 2838 DeoptimizeIf(ne, instr->environment()); | 2830 DeoptimizeIf(ne, instr); |
| 2839 | 2831 |
| 2840 if (instr->tag_result()) { | 2832 if (instr->tag_result()) { |
| 2841 __ SmiTag(result.X()); | 2833 __ SmiTag(result.X()); |
| 2842 } | 2834 } |
| 2843 } | 2835 } |
| 2844 | 2836 |
| 2845 | 2837 |
| 2846 void LCodeGen::DoDrop(LDrop* instr) { | 2838 void LCodeGen::DoDrop(LDrop* instr) { |
| 2847 __ Drop(instr->count()); | 2839 __ Drop(instr->count()); |
| 2848 } | 2840 } |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2889 __ EnumLengthUntagged(result, map); | 2881 __ EnumLengthUntagged(result, map); |
| 2890 __ Cbnz(result, &load_cache); | 2882 __ Cbnz(result, &load_cache); |
| 2891 | 2883 |
| 2892 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array())); | 2884 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array())); |
| 2893 __ B(&done); | 2885 __ B(&done); |
| 2894 | 2886 |
| 2895 __ Bind(&load_cache); | 2887 __ Bind(&load_cache); |
| 2896 __ LoadInstanceDescriptors(map, result); | 2888 __ LoadInstanceDescriptors(map, result); |
| 2897 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 2889 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
| 2898 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 2890 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
| 2899 DeoptimizeIfZero(result, instr->environment()); | 2891 DeoptimizeIfZero(result, instr); |
| 2900 | 2892 |
| 2901 __ Bind(&done); | 2893 __ Bind(&done); |
| 2902 } | 2894 } |
| 2903 | 2895 |
| 2904 | 2896 |
| 2905 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 2897 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
| 2906 Register object = ToRegister(instr->object()); | 2898 Register object = ToRegister(instr->object()); |
| 2907 Register null_value = x5; | 2899 Register null_value = x5; |
| 2908 | 2900 |
| 2909 DCHECK(instr->IsMarkedAsCall()); | 2901 DCHECK(instr->IsMarkedAsCall()); |
| 2910 DCHECK(object.Is(x0)); | 2902 DCHECK(object.Is(x0)); |
| 2911 | 2903 |
| 2912 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, | 2904 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr); |
| 2913 instr->environment()); | |
| 2914 | 2905 |
| 2915 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 2906 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
| 2916 __ Cmp(object, null_value); | 2907 __ Cmp(object, null_value); |
| 2917 DeoptimizeIf(eq, instr->environment()); | 2908 DeoptimizeIf(eq, instr); |
| 2918 | 2909 |
| 2919 DeoptimizeIfSmi(object, instr->environment()); | 2910 DeoptimizeIfSmi(object, instr); |
| 2920 | 2911 |
| 2921 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 2912 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
| 2922 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE); | 2913 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE); |
| 2923 DeoptimizeIf(le, instr->environment()); | 2914 DeoptimizeIf(le, instr); |
| 2924 | 2915 |
| 2925 Label use_cache, call_runtime; | 2916 Label use_cache, call_runtime; |
| 2926 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime); | 2917 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime); |
| 2927 | 2918 |
| 2928 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); | 2919 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2929 __ B(&use_cache); | 2920 __ B(&use_cache); |
| 2930 | 2921 |
| 2931 // Get the set of properties to enumerate. | 2922 // Get the set of properties to enumerate. |
| 2932 __ Bind(&call_runtime); | 2923 __ Bind(&call_runtime); |
| 2933 __ Push(object); | 2924 __ Push(object); |
| 2934 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 2925 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
| 2935 | 2926 |
| 2936 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset)); | 2927 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2937 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr->environment()); | 2928 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr); |
| 2938 | 2929 |
| 2939 __ Bind(&use_cache); | 2930 __ Bind(&use_cache); |
| 2940 } | 2931 } |
| 2941 | 2932 |
| 2942 | 2933 |
| 2943 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { | 2934 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { |
| 2944 Register input = ToRegister(instr->value()); | 2935 Register input = ToRegister(instr->value()); |
| 2945 Register result = ToRegister(instr->result()); | 2936 Register result = ToRegister(instr->result()); |
| 2946 | 2937 |
| 2947 __ AssertString(input); | 2938 __ AssertString(input); |
| (...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3320 DoGap(label); | 3311 DoGap(label); |
| 3321 } | 3312 } |
| 3322 | 3313 |
| 3323 | 3314 |
| 3324 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 3315 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 3325 Register context = ToRegister(instr->context()); | 3316 Register context = ToRegister(instr->context()); |
| 3326 Register result = ToRegister(instr->result()); | 3317 Register result = ToRegister(instr->result()); |
| 3327 __ Ldr(result, ContextMemOperand(context, instr->slot_index())); | 3318 __ Ldr(result, ContextMemOperand(context, instr->slot_index())); |
| 3328 if (instr->hydrogen()->RequiresHoleCheck()) { | 3319 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3329 if (instr->hydrogen()->DeoptimizesOnHole()) { | 3320 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 3330 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, | 3321 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr); |
| 3331 instr->environment()); | |
| 3332 } else { | 3322 } else { |
| 3333 Label not_the_hole; | 3323 Label not_the_hole; |
| 3334 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole); | 3324 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole); |
| 3335 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | 3325 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
| 3336 __ Bind(¬_the_hole); | 3326 __ Bind(¬_the_hole); |
| 3337 } | 3327 } |
| 3338 } | 3328 } |
| 3339 } | 3329 } |
| 3340 | 3330 |
| 3341 | 3331 |
| 3342 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { | 3332 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { |
| 3343 Register function = ToRegister(instr->function()); | 3333 Register function = ToRegister(instr->function()); |
| 3344 Register result = ToRegister(instr->result()); | 3334 Register result = ToRegister(instr->result()); |
| 3345 Register temp = ToRegister(instr->temp()); | 3335 Register temp = ToRegister(instr->temp()); |
| 3346 | 3336 |
| 3347 // Get the prototype or initial map from the function. | 3337 // Get the prototype or initial map from the function. |
| 3348 __ Ldr(result, FieldMemOperand(function, | 3338 __ Ldr(result, FieldMemOperand(function, |
| 3349 JSFunction::kPrototypeOrInitialMapOffset)); | 3339 JSFunction::kPrototypeOrInitialMapOffset)); |
| 3350 | 3340 |
| 3351 // Check that the function has a prototype or an initial map. | 3341 // Check that the function has a prototype or an initial map. |
| 3352 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, | 3342 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr); |
| 3353 instr->environment()); | |
| 3354 | 3343 |
| 3355 // If the function does not have an initial map, we're done. | 3344 // If the function does not have an initial map, we're done. |
| 3356 Label done; | 3345 Label done; |
| 3357 __ CompareObjectType(result, temp, temp, MAP_TYPE); | 3346 __ CompareObjectType(result, temp, temp, MAP_TYPE); |
| 3358 __ B(ne, &done); | 3347 __ B(ne, &done); |
| 3359 | 3348 |
| 3360 // Get the prototype from the initial map. | 3349 // Get the prototype from the initial map. |
| 3361 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 3350 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 3362 | 3351 |
| 3363 // All done. | 3352 // All done. |
| 3364 __ Bind(&done); | 3353 __ Bind(&done); |
| 3365 } | 3354 } |
| 3366 | 3355 |
| 3367 | 3356 |
| 3368 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 3357 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
| 3369 Register result = ToRegister(instr->result()); | 3358 Register result = ToRegister(instr->result()); |
| 3370 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | 3359 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
| 3371 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); | 3360 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); |
| 3372 if (instr->hydrogen()->RequiresHoleCheck()) { | 3361 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3373 DeoptimizeIfRoot( | 3362 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr); |
| 3374 result, Heap::kTheHoleValueRootIndex, instr->environment()); | |
| 3375 } | 3363 } |
| 3376 } | 3364 } |
| 3377 | 3365 |
| 3378 | 3366 |
| 3379 template <class T> | 3367 template <class T> |
| 3380 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 3368 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { |
| 3381 DCHECK(FLAG_vector_ics); | 3369 DCHECK(FLAG_vector_ics); |
| 3382 Register vector = ToRegister(instr->temp_vector()); | 3370 Register vector = ToRegister(instr->temp_vector()); |
| 3383 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister())); | 3371 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister())); |
| 3384 __ Mov(vector, instr->hydrogen()->feedback_vector()); | 3372 __ Mov(vector, instr->hydrogen()->feedback_vector()); |
| (...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3495 case EXTERNAL_INT32_ELEMENTS: | 3483 case EXTERNAL_INT32_ELEMENTS: |
| 3496 case INT32_ELEMENTS: | 3484 case INT32_ELEMENTS: |
| 3497 __ Ldrsw(result, mem_op); | 3485 __ Ldrsw(result, mem_op); |
| 3498 break; | 3486 break; |
| 3499 case EXTERNAL_UINT32_ELEMENTS: | 3487 case EXTERNAL_UINT32_ELEMENTS: |
| 3500 case UINT32_ELEMENTS: | 3488 case UINT32_ELEMENTS: |
| 3501 __ Ldr(result.W(), mem_op); | 3489 __ Ldr(result.W(), mem_op); |
| 3502 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3490 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
| 3503 // Deopt if value > 0x80000000. | 3491 // Deopt if value > 0x80000000. |
| 3504 __ Tst(result, 0xFFFFFFFF80000000); | 3492 __ Tst(result, 0xFFFFFFFF80000000); |
| 3505 DeoptimizeIf(ne, instr->environment()); | 3493 DeoptimizeIf(ne, instr); |
| 3506 } | 3494 } |
| 3507 break; | 3495 break; |
| 3508 case FLOAT32_ELEMENTS: | 3496 case FLOAT32_ELEMENTS: |
| 3509 case FLOAT64_ELEMENTS: | 3497 case FLOAT64_ELEMENTS: |
| 3510 case EXTERNAL_FLOAT32_ELEMENTS: | 3498 case EXTERNAL_FLOAT32_ELEMENTS: |
| 3511 case EXTERNAL_FLOAT64_ELEMENTS: | 3499 case EXTERNAL_FLOAT64_ELEMENTS: |
| 3512 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3500 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 3513 case FAST_HOLEY_ELEMENTS: | 3501 case FAST_HOLEY_ELEMENTS: |
| 3514 case FAST_HOLEY_SMI_ELEMENTS: | 3502 case FAST_HOLEY_SMI_ELEMENTS: |
| 3515 case FAST_DOUBLE_ELEMENTS: | 3503 case FAST_DOUBLE_ELEMENTS: |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3592 | 3580 |
| 3593 __ Ldr(result, mem_op); | 3581 __ Ldr(result, mem_op); |
| 3594 | 3582 |
| 3595 if (instr->hydrogen()->RequiresHoleCheck()) { | 3583 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3596 Register scratch = ToRegister(instr->temp()); | 3584 Register scratch = ToRegister(instr->temp()); |
| 3597 // Detect the hole NaN by adding one to the integer representation of the | 3585 // Detect the hole NaN by adding one to the integer representation of the |
| 3598 // result, and checking for overflow. | 3586 // result, and checking for overflow. |
| 3599 STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff); | 3587 STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff); |
| 3600 __ Ldr(scratch, mem_op); | 3588 __ Ldr(scratch, mem_op); |
| 3601 __ Cmn(scratch, 1); | 3589 __ Cmn(scratch, 1); |
| 3602 DeoptimizeIf(vs, instr->environment()); | 3590 DeoptimizeIf(vs, instr); |
| 3603 } | 3591 } |
| 3604 } | 3592 } |
| 3605 | 3593 |
| 3606 | 3594 |
| 3607 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { | 3595 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { |
| 3608 Register elements = ToRegister(instr->elements()); | 3596 Register elements = ToRegister(instr->elements()); |
| 3609 Register result = ToRegister(instr->result()); | 3597 Register result = ToRegister(instr->result()); |
| 3610 MemOperand mem_op; | 3598 MemOperand mem_op; |
| 3611 | 3599 |
| 3612 Representation representation = instr->hydrogen()->representation(); | 3600 Representation representation = instr->hydrogen()->representation(); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 3630 | 3618 |
| 3631 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, | 3619 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, |
| 3632 instr->hydrogen()->elements_kind(), | 3620 instr->hydrogen()->elements_kind(), |
| 3633 representation, instr->base_offset()); | 3621 representation, instr->base_offset()); |
| 3634 } | 3622 } |
| 3635 | 3623 |
| 3636 __ Load(result, mem_op, representation); | 3624 __ Load(result, mem_op, representation); |
| 3637 | 3625 |
| 3638 if (instr->hydrogen()->RequiresHoleCheck()) { | 3626 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3639 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 3627 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
| 3640 DeoptimizeIfNotSmi(result, instr->environment()); | 3628 DeoptimizeIfNotSmi(result, instr); |
| 3641 } else { | 3629 } else { |
| 3642 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, | 3630 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr); |
| 3643 instr->environment()); | |
| 3644 } | 3631 } |
| 3645 } | 3632 } |
| 3646 } | 3633 } |
| 3647 | 3634 |
| 3648 | 3635 |
| 3649 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { | 3636 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { |
| 3650 DCHECK(ToRegister(instr->context()).is(cp)); | 3637 DCHECK(ToRegister(instr->context()).is(cp)); |
| 3651 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); | 3638 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); |
| 3652 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); | 3639 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); |
| 3653 if (FLAG_vector_ics) { | 3640 if (FLAG_vector_ics) { |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3735 if (r.IsDouble()) { | 3722 if (r.IsDouble()) { |
| 3736 DoubleRegister input = ToDoubleRegister(instr->value()); | 3723 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3737 DoubleRegister result = ToDoubleRegister(instr->result()); | 3724 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3738 __ Fabs(result, input); | 3725 __ Fabs(result, input); |
| 3739 } else if (r.IsSmi() || r.IsInteger32()) { | 3726 } else if (r.IsSmi() || r.IsInteger32()) { |
| 3740 Register input = r.IsSmi() ? ToRegister(instr->value()) | 3727 Register input = r.IsSmi() ? ToRegister(instr->value()) |
| 3741 : ToRegister32(instr->value()); | 3728 : ToRegister32(instr->value()); |
| 3742 Register result = r.IsSmi() ? ToRegister(instr->result()) | 3729 Register result = r.IsSmi() ? ToRegister(instr->result()) |
| 3743 : ToRegister32(instr->result()); | 3730 : ToRegister32(instr->result()); |
| 3744 __ Abs(result, input); | 3731 __ Abs(result, input); |
| 3745 DeoptimizeIf(vs, instr->environment()); | 3732 DeoptimizeIf(vs, instr); |
| 3746 } | 3733 } |
| 3747 } | 3734 } |
| 3748 | 3735 |
| 3749 | 3736 |
| 3750 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr, | 3737 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr, |
| 3751 Label* exit, | 3738 Label* exit, |
| 3752 Label* allocation_entry) { | 3739 Label* allocation_entry) { |
| 3753 // Handle the tricky cases of MathAbsTagged: | 3740 // Handle the tricky cases of MathAbsTagged: |
| 3754 // - HeapNumber inputs. | 3741 // - HeapNumber inputs. |
| 3755 // - Negative inputs produce a positive result, so a new HeapNumber is | 3742 // - Negative inputs produce a positive result, so a new HeapNumber is |
| 3756 // allocated to hold it. | 3743 // allocated to hold it. |
| 3757 // - Positive inputs are returned as-is, since there is no need to allocate | 3744 // - Positive inputs are returned as-is, since there is no need to allocate |
| 3758 // a new HeapNumber for the result. | 3745 // a new HeapNumber for the result. |
| 3759 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit | 3746 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit |
| 3760 // a smi. In this case, the inline code sets the result and jumps directly | 3747 // a smi. In this case, the inline code sets the result and jumps directly |
| 3761 // to the allocation_entry label. | 3748 // to the allocation_entry label. |
| 3762 DCHECK(instr->context() != NULL); | 3749 DCHECK(instr->context() != NULL); |
| 3763 DCHECK(ToRegister(instr->context()).is(cp)); | 3750 DCHECK(ToRegister(instr->context()).is(cp)); |
| 3764 Register input = ToRegister(instr->value()); | 3751 Register input = ToRegister(instr->value()); |
| 3765 Register temp1 = ToRegister(instr->temp1()); | 3752 Register temp1 = ToRegister(instr->temp1()); |
| 3766 Register temp2 = ToRegister(instr->temp2()); | 3753 Register temp2 = ToRegister(instr->temp2()); |
| 3767 Register result_bits = ToRegister(instr->temp3()); | 3754 Register result_bits = ToRegister(instr->temp3()); |
| 3768 Register result = ToRegister(instr->result()); | 3755 Register result = ToRegister(instr->result()); |
| 3769 | 3756 |
| 3770 Label runtime_allocation; | 3757 Label runtime_allocation; |
| 3771 | 3758 |
| 3772 // Deoptimize if the input is not a HeapNumber. | 3759 // Deoptimize if the input is not a HeapNumber. |
| 3773 __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); | 3760 __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 3774 DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex, | 3761 DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex, instr); |
| 3775 instr->environment()); | |
| 3776 | 3762 |
| 3777 // If the argument is positive, we can return it as-is, without any need to | 3763 // If the argument is positive, we can return it as-is, without any need to |
| 3778 // allocate a new HeapNumber for the result. We have to do this in integer | 3764 // allocate a new HeapNumber for the result. We have to do this in integer |
| 3779 // registers (rather than with fabs) because we need to be able to distinguish | 3765 // registers (rather than with fabs) because we need to be able to distinguish |
| 3780 // the two zeroes. | 3766 // the two zeroes. |
| 3781 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset)); | 3767 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset)); |
| 3782 __ Mov(result, input); | 3768 __ Mov(result, input); |
| 3783 __ Tbz(result_bits, kXSignBit, exit); | 3769 __ Tbz(result_bits, kXSignBit, exit); |
| 3784 | 3770 |
| 3785 // Calculate abs(input) by clearing the sign bit. | 3771 // Calculate abs(input) by clearing the sign bit. |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3889 | 3875 |
| 3890 __ Frintm(result, input); | 3876 __ Frintm(result, input); |
| 3891 } | 3877 } |
| 3892 | 3878 |
| 3893 | 3879 |
| 3894 void LCodeGen::DoMathFloorI(LMathFloorI* instr) { | 3880 void LCodeGen::DoMathFloorI(LMathFloorI* instr) { |
| 3895 DoubleRegister input = ToDoubleRegister(instr->value()); | 3881 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3896 Register result = ToRegister(instr->result()); | 3882 Register result = ToRegister(instr->result()); |
| 3897 | 3883 |
| 3898 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3884 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3899 DeoptimizeIfMinusZero(input, instr->environment()); | 3885 DeoptimizeIfMinusZero(input, instr); |
| 3900 } | 3886 } |
| 3901 | 3887 |
| 3902 __ Fcvtms(result, input); | 3888 __ Fcvtms(result, input); |
| 3903 | 3889 |
| 3904 // Check that the result fits into a 32-bit integer. | 3890 // Check that the result fits into a 32-bit integer. |
| 3905 // - The result did not overflow. | 3891 // - The result did not overflow. |
| 3906 __ Cmp(result, Operand(result, SXTW)); | 3892 __ Cmp(result, Operand(result, SXTW)); |
| 3907 // - The input was not NaN. | 3893 // - The input was not NaN. |
| 3908 __ Fccmp(input, input, NoFlag, eq); | 3894 __ Fccmp(input, input, NoFlag, eq); |
| 3909 DeoptimizeIf(ne, instr->environment()); | 3895 DeoptimizeIf(ne, instr); |
| 3910 } | 3896 } |
| 3911 | 3897 |
| 3912 | 3898 |
| 3913 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | 3899 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
| 3914 Register dividend = ToRegister32(instr->dividend()); | 3900 Register dividend = ToRegister32(instr->dividend()); |
| 3915 Register result = ToRegister32(instr->result()); | 3901 Register result = ToRegister32(instr->result()); |
| 3916 int32_t divisor = instr->divisor(); | 3902 int32_t divisor = instr->divisor(); |
| 3917 | 3903 |
| 3918 // If the divisor is 1, return the dividend. | 3904 // If the divisor is 1, return the dividend. |
| 3919 if (divisor == 1) { | 3905 if (divisor == 1) { |
| 3920 __ Mov(result, dividend, kDiscardForSameWReg); | 3906 __ Mov(result, dividend, kDiscardForSameWReg); |
| 3921 return; | 3907 return; |
| 3922 } | 3908 } |
| 3923 | 3909 |
| 3924 // If the divisor is positive, things are easy: There can be no deopts and we | 3910 // If the divisor is positive, things are easy: There can be no deopts and we |
| 3925 // can simply do an arithmetic right shift. | 3911 // can simply do an arithmetic right shift. |
| 3926 int32_t shift = WhichPowerOf2Abs(divisor); | 3912 int32_t shift = WhichPowerOf2Abs(divisor); |
| 3927 if (divisor > 1) { | 3913 if (divisor > 1) { |
| 3928 __ Mov(result, Operand(dividend, ASR, shift)); | 3914 __ Mov(result, Operand(dividend, ASR, shift)); |
| 3929 return; | 3915 return; |
| 3930 } | 3916 } |
| 3931 | 3917 |
| 3932 // If the divisor is negative, we have to negate and handle edge cases. | 3918 // If the divisor is negative, we have to negate and handle edge cases. |
| 3933 __ Negs(result, dividend); | 3919 __ Negs(result, dividend); |
| 3934 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3920 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3935 DeoptimizeIf(eq, instr->environment()); | 3921 DeoptimizeIf(eq, instr); |
| 3936 } | 3922 } |
| 3937 | 3923 |
| 3938 // Dividing by -1 is basically negation, unless we overflow. | 3924 // Dividing by -1 is basically negation, unless we overflow. |
| 3939 if (divisor == -1) { | 3925 if (divisor == -1) { |
| 3940 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 3926 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 3941 DeoptimizeIf(vs, instr->environment()); | 3927 DeoptimizeIf(vs, instr); |
| 3942 } | 3928 } |
| 3943 return; | 3929 return; |
| 3944 } | 3930 } |
| 3945 | 3931 |
| 3946 // If the negation could not overflow, simply shifting is OK. | 3932 // If the negation could not overflow, simply shifting is OK. |
| 3947 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 3933 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 3948 __ Mov(result, Operand(dividend, ASR, shift)); | 3934 __ Mov(result, Operand(dividend, ASR, shift)); |
| 3949 return; | 3935 return; |
| 3950 } | 3936 } |
| 3951 | 3937 |
| 3952 __ Asr(result, result, shift); | 3938 __ Asr(result, result, shift); |
| 3953 __ Csel(result, result, kMinInt / divisor, vc); | 3939 __ Csel(result, result, kMinInt / divisor, vc); |
| 3954 } | 3940 } |
| 3955 | 3941 |
| 3956 | 3942 |
| 3957 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 3943 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
| 3958 Register dividend = ToRegister32(instr->dividend()); | 3944 Register dividend = ToRegister32(instr->dividend()); |
| 3959 int32_t divisor = instr->divisor(); | 3945 int32_t divisor = instr->divisor(); |
| 3960 Register result = ToRegister32(instr->result()); | 3946 Register result = ToRegister32(instr->result()); |
| 3961 DCHECK(!AreAliased(dividend, result)); | 3947 DCHECK(!AreAliased(dividend, result)); |
| 3962 | 3948 |
| 3963 if (divisor == 0) { | 3949 if (divisor == 0) { |
| 3964 Deoptimize(instr->environment()); | 3950 Deoptimize(instr); |
| 3965 return; | 3951 return; |
| 3966 } | 3952 } |
| 3967 | 3953 |
| 3968 // Check for (0 / -x) that will produce negative zero. | 3954 // Check for (0 / -x) that will produce negative zero. |
| 3969 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 3955 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
| 3970 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 3956 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 3971 DeoptimizeIfZero(dividend, instr->environment()); | 3957 DeoptimizeIfZero(dividend, instr); |
| 3972 } | 3958 } |
| 3973 | 3959 |
| 3974 // Easy case: We need no dynamic check for the dividend and the flooring | 3960 // Easy case: We need no dynamic check for the dividend and the flooring |
| 3975 // division is the same as the truncating division. | 3961 // division is the same as the truncating division. |
| 3976 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 3962 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
| 3977 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 3963 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
| 3978 __ TruncatingDiv(result, dividend, Abs(divisor)); | 3964 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 3979 if (divisor < 0) __ Neg(result, result); | 3965 if (divisor < 0) __ Neg(result, result); |
| 3980 return; | 3966 return; |
| 3981 } | 3967 } |
| (...skipping 22 matching lines...) Expand all Loading... |
| 4004 Register dividend = ToRegister32(instr->dividend()); | 3990 Register dividend = ToRegister32(instr->dividend()); |
| 4005 Register divisor = ToRegister32(instr->divisor()); | 3991 Register divisor = ToRegister32(instr->divisor()); |
| 4006 Register remainder = ToRegister32(instr->temp()); | 3992 Register remainder = ToRegister32(instr->temp()); |
| 4007 Register result = ToRegister32(instr->result()); | 3993 Register result = ToRegister32(instr->result()); |
| 4008 | 3994 |
| 4009 // This can't cause an exception on ARM, so we can speculatively | 3995 // This can't cause an exception on ARM, so we can speculatively |
| 4010 // execute it already now. | 3996 // execute it already now. |
| 4011 __ Sdiv(result, dividend, divisor); | 3997 __ Sdiv(result, dividend, divisor); |
| 4012 | 3998 |
| 4013 // Check for x / 0. | 3999 // Check for x / 0. |
| 4014 DeoptimizeIfZero(divisor, instr->environment()); | 4000 DeoptimizeIfZero(divisor, instr); |
| 4015 | 4001 |
| 4016 // Check for (kMinInt / -1). | 4002 // Check for (kMinInt / -1). |
| 4017 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 4003 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 4018 // The V flag will be set iff dividend == kMinInt. | 4004 // The V flag will be set iff dividend == kMinInt. |
| 4019 __ Cmp(dividend, 1); | 4005 __ Cmp(dividend, 1); |
| 4020 __ Ccmp(divisor, -1, NoFlag, vs); | 4006 __ Ccmp(divisor, -1, NoFlag, vs); |
| 4021 DeoptimizeIf(eq, instr->environment()); | 4007 DeoptimizeIf(eq, instr); |
| 4022 } | 4008 } |
| 4023 | 4009 |
| 4024 // Check for (0 / -x) that will produce negative zero. | 4010 // Check for (0 / -x) that will produce negative zero. |
| 4025 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4011 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4026 __ Cmp(divisor, 0); | 4012 __ Cmp(divisor, 0); |
| 4027 __ Ccmp(dividend, 0, ZFlag, mi); | 4013 __ Ccmp(dividend, 0, ZFlag, mi); |
| 4028 // "divisor" can't be null because the code would have already been | 4014 // "divisor" can't be null because the code would have already been |
| 4029 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). | 4015 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). |
| 4030 // In this case we need to deoptimize to produce a -0. | 4016 // In this case we need to deoptimize to produce a -0. |
| 4031 DeoptimizeIf(eq, instr->environment()); | 4017 DeoptimizeIf(eq, instr); |
| 4032 } | 4018 } |
| 4033 | 4019 |
| 4034 Label done; | 4020 Label done; |
| 4035 // If both operands have the same sign then we are done. | 4021 // If both operands have the same sign then we are done. |
| 4036 __ Eor(remainder, dividend, divisor); | 4022 __ Eor(remainder, dividend, divisor); |
| 4037 __ Tbz(remainder, kWSignBit, &done); | 4023 __ Tbz(remainder, kWSignBit, &done); |
| 4038 | 4024 |
| 4039 // Check if the result needs to be corrected. | 4025 // Check if the result needs to be corrected. |
| 4040 __ Msub(remainder, result, divisor, dividend); | 4026 __ Msub(remainder, result, divisor, dividend); |
| 4041 __ Cbz(remainder, &done); | 4027 __ Cbz(remainder, &done); |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4101 DCHECK(ToDoubleRegister(instr->result()).is(d0)); | 4087 DCHECK(ToDoubleRegister(instr->result()).is(d0)); |
| 4102 | 4088 |
| 4103 if (exponent_type.IsSmi()) { | 4089 if (exponent_type.IsSmi()) { |
| 4104 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 4090 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 4105 __ CallStub(&stub); | 4091 __ CallStub(&stub); |
| 4106 } else if (exponent_type.IsTagged()) { | 4092 } else if (exponent_type.IsTagged()) { |
| 4107 Label no_deopt; | 4093 Label no_deopt; |
| 4108 __ JumpIfSmi(tagged_exponent, &no_deopt); | 4094 __ JumpIfSmi(tagged_exponent, &no_deopt); |
| 4109 DCHECK(!x0.is(tagged_exponent)); | 4095 DCHECK(!x0.is(tagged_exponent)); |
| 4110 __ Ldr(x0, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | 4096 __ Ldr(x0, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); |
| 4111 DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex, | 4097 DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex, instr); |
| 4112 instr->environment()); | |
| 4113 __ Bind(&no_deopt); | 4098 __ Bind(&no_deopt); |
| 4114 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 4099 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 4115 __ CallStub(&stub); | 4100 __ CallStub(&stub); |
| 4116 } else if (exponent_type.IsInteger32()) { | 4101 } else if (exponent_type.IsInteger32()) { |
| 4117 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub | 4102 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub |
| 4118 // supports large integer exponents. | 4103 // supports large integer exponents. |
| 4119 __ Sxtw(integer_exponent, integer_exponent); | 4104 __ Sxtw(integer_exponent, integer_exponent); |
| 4120 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 4105 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
| 4121 __ CallStub(&stub); | 4106 __ CallStub(&stub); |
| 4122 } else { | 4107 } else { |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4183 // result fits in 32 bits. | 4168 // result fits in 32 bits. |
| 4184 __ Cmp(result, Operand(result.W(), SXTW)); | 4169 __ Cmp(result, Operand(result.W(), SXTW)); |
| 4185 __ Ccmp(result, 1, ZFlag, eq); | 4170 __ Ccmp(result, 1, ZFlag, eq); |
| 4186 __ B(hi, &done); | 4171 __ B(hi, &done); |
| 4187 | 4172 |
| 4188 // At this point, we have to handle possible inputs of NaN or numbers in the | 4173 // At this point, we have to handle possible inputs of NaN or numbers in the |
| 4189 // range [-0.5, 1.5[, or numbers larger than 32 bits. | 4174 // range [-0.5, 1.5[, or numbers larger than 32 bits. |
| 4190 | 4175 |
| 4191 // Deoptimize if the result > 1, as it must be larger than 32 bits. | 4176 // Deoptimize if the result > 1, as it must be larger than 32 bits. |
| 4192 __ Cmp(result, 1); | 4177 __ Cmp(result, 1); |
| 4193 DeoptimizeIf(hi, instr->environment()); | 4178 DeoptimizeIf(hi, instr); |
| 4194 | 4179 |
| 4195 // Deoptimize for negative inputs, which at this point are only numbers in | 4180 // Deoptimize for negative inputs, which at this point are only numbers in |
| 4196 // the range [-0.5, -0.0] | 4181 // the range [-0.5, -0.0] |
| 4197 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4182 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4198 __ Fmov(result, input); | 4183 __ Fmov(result, input); |
| 4199 DeoptimizeIfNegative(result, instr->environment()); | 4184 DeoptimizeIfNegative(result, instr); |
| 4200 } | 4185 } |
| 4201 | 4186 |
| 4202 // Deoptimize if the input was NaN. | 4187 // Deoptimize if the input was NaN. |
| 4203 __ Fcmp(input, dot_five); | 4188 __ Fcmp(input, dot_five); |
| 4204 DeoptimizeIf(vs, instr->environment()); | 4189 DeoptimizeIf(vs, instr); |
| 4205 | 4190 |
| 4206 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[ | 4191 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[ |
| 4207 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1, | 4192 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1, |
| 4208 // else 0; we avoid dealing with 0.499...94 directly. | 4193 // else 0; we avoid dealing with 0.499...94 directly. |
| 4209 __ Cset(result, ge); | 4194 __ Cset(result, ge); |
| 4210 __ Bind(&done); | 4195 __ Bind(&done); |
| 4211 } | 4196 } |
| 4212 | 4197 |
| 4213 | 4198 |
| 4214 void LCodeGen::DoMathFround(LMathFround* instr) { | 4199 void LCodeGen::DoMathFround(LMathFround* instr) { |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4272 HMod* hmod = instr->hydrogen(); | 4257 HMod* hmod = instr->hydrogen(); |
| 4273 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 4258 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 4274 Label dividend_is_not_negative, done; | 4259 Label dividend_is_not_negative, done; |
| 4275 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 4260 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
| 4276 __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative); | 4261 __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative); |
| 4277 // Note that this is correct even for kMinInt operands. | 4262 // Note that this is correct even for kMinInt operands. |
| 4278 __ Neg(dividend, dividend); | 4263 __ Neg(dividend, dividend); |
| 4279 __ And(dividend, dividend, mask); | 4264 __ And(dividend, dividend, mask); |
| 4280 __ Negs(dividend, dividend); | 4265 __ Negs(dividend, dividend); |
| 4281 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4266 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4282 DeoptimizeIf(eq, instr->environment()); | 4267 DeoptimizeIf(eq, instr); |
| 4283 } | 4268 } |
| 4284 __ B(&done); | 4269 __ B(&done); |
| 4285 } | 4270 } |
| 4286 | 4271 |
| 4287 __ bind(÷nd_is_not_negative); | 4272 __ bind(÷nd_is_not_negative); |
| 4288 __ And(dividend, dividend, mask); | 4273 __ And(dividend, dividend, mask); |
| 4289 __ bind(&done); | 4274 __ bind(&done); |
| 4290 } | 4275 } |
| 4291 | 4276 |
| 4292 | 4277 |
| 4293 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 4278 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
| 4294 Register dividend = ToRegister32(instr->dividend()); | 4279 Register dividend = ToRegister32(instr->dividend()); |
| 4295 int32_t divisor = instr->divisor(); | 4280 int32_t divisor = instr->divisor(); |
| 4296 Register result = ToRegister32(instr->result()); | 4281 Register result = ToRegister32(instr->result()); |
| 4297 Register temp = ToRegister32(instr->temp()); | 4282 Register temp = ToRegister32(instr->temp()); |
| 4298 DCHECK(!AreAliased(dividend, result, temp)); | 4283 DCHECK(!AreAliased(dividend, result, temp)); |
| 4299 | 4284 |
| 4300 if (divisor == 0) { | 4285 if (divisor == 0) { |
| 4301 Deoptimize(instr->environment()); | 4286 Deoptimize(instr); |
| 4302 return; | 4287 return; |
| 4303 } | 4288 } |
| 4304 | 4289 |
| 4305 __ TruncatingDiv(result, dividend, Abs(divisor)); | 4290 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 4306 __ Sxtw(dividend.X(), dividend); | 4291 __ Sxtw(dividend.X(), dividend); |
| 4307 __ Mov(temp, Abs(divisor)); | 4292 __ Mov(temp, Abs(divisor)); |
| 4308 __ Smsubl(result.X(), result, temp, dividend.X()); | 4293 __ Smsubl(result.X(), result, temp, dividend.X()); |
| 4309 | 4294 |
| 4310 // Check for negative zero. | 4295 // Check for negative zero. |
| 4311 HMod* hmod = instr->hydrogen(); | 4296 HMod* hmod = instr->hydrogen(); |
| 4312 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4297 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4313 Label remainder_not_zero; | 4298 Label remainder_not_zero; |
| 4314 __ Cbnz(result, &remainder_not_zero); | 4299 __ Cbnz(result, &remainder_not_zero); |
| 4315 DeoptimizeIfNegative(dividend, instr->environment()); | 4300 DeoptimizeIfNegative(dividend, instr); |
| 4316 __ bind(&remainder_not_zero); | 4301 __ bind(&remainder_not_zero); |
| 4317 } | 4302 } |
| 4318 } | 4303 } |
| 4319 | 4304 |
| 4320 | 4305 |
| 4321 void LCodeGen::DoModI(LModI* instr) { | 4306 void LCodeGen::DoModI(LModI* instr) { |
| 4322 Register dividend = ToRegister32(instr->left()); | 4307 Register dividend = ToRegister32(instr->left()); |
| 4323 Register divisor = ToRegister32(instr->right()); | 4308 Register divisor = ToRegister32(instr->right()); |
| 4324 Register result = ToRegister32(instr->result()); | 4309 Register result = ToRegister32(instr->result()); |
| 4325 | 4310 |
| 4326 Label done; | 4311 Label done; |
| 4327 // modulo = dividend - quotient * divisor | 4312 // modulo = dividend - quotient * divisor |
| 4328 __ Sdiv(result, dividend, divisor); | 4313 __ Sdiv(result, dividend, divisor); |
| 4329 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { | 4314 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { |
| 4330 DeoptimizeIfZero(divisor, instr->environment()); | 4315 DeoptimizeIfZero(divisor, instr); |
| 4331 } | 4316 } |
| 4332 __ Msub(result, result, divisor, dividend); | 4317 __ Msub(result, result, divisor, dividend); |
| 4333 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4318 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4334 __ Cbnz(result, &done); | 4319 __ Cbnz(result, &done); |
| 4335 DeoptimizeIfNegative(dividend, instr->environment()); | 4320 DeoptimizeIfNegative(dividend, instr); |
| 4336 } | 4321 } |
| 4337 __ Bind(&done); | 4322 __ Bind(&done); |
| 4338 } | 4323 } |
| 4339 | 4324 |
| 4340 | 4325 |
| 4341 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { | 4326 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { |
| 4342 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32()); | 4327 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32()); |
| 4343 bool is_smi = instr->hydrogen()->representation().IsSmi(); | 4328 bool is_smi = instr->hydrogen()->representation().IsSmi(); |
| 4344 Register result = | 4329 Register result = |
| 4345 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); | 4330 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); |
| 4346 Register left = | 4331 Register left = |
| 4347 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ; | 4332 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ; |
| 4348 int32_t right = ToInteger32(instr->right()); | 4333 int32_t right = ToInteger32(instr->right()); |
| 4349 DCHECK((right > -kMaxInt) || (right < kMaxInt)); | 4334 DCHECK((right > -kMaxInt) || (right < kMaxInt)); |
| 4350 | 4335 |
| 4351 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 4336 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 4352 bool bailout_on_minus_zero = | 4337 bool bailout_on_minus_zero = |
| 4353 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 4338 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 4354 | 4339 |
| 4355 if (bailout_on_minus_zero) { | 4340 if (bailout_on_minus_zero) { |
| 4356 if (right < 0) { | 4341 if (right < 0) { |
| 4357 // The result is -0 if right is negative and left is zero. | 4342 // The result is -0 if right is negative and left is zero. |
| 4358 DeoptimizeIfZero(left, instr->environment()); | 4343 DeoptimizeIfZero(left, instr); |
| 4359 } else if (right == 0) { | 4344 } else if (right == 0) { |
| 4360 // The result is -0 if the right is zero and the left is negative. | 4345 // The result is -0 if the right is zero and the left is negative. |
| 4361 DeoptimizeIfNegative(left, instr->environment()); | 4346 DeoptimizeIfNegative(left, instr); |
| 4362 } | 4347 } |
| 4363 } | 4348 } |
| 4364 | 4349 |
| 4365 switch (right) { | 4350 switch (right) { |
| 4366 // Cases which can detect overflow. | 4351 // Cases which can detect overflow. |
| 4367 case -1: | 4352 case -1: |
| 4368 if (can_overflow) { | 4353 if (can_overflow) { |
| 4369 // Only 0x80000000 can overflow here. | 4354 // Only 0x80000000 can overflow here. |
| 4370 __ Negs(result, left); | 4355 __ Negs(result, left); |
| 4371 DeoptimizeIf(vs, instr->environment()); | 4356 DeoptimizeIf(vs, instr); |
| 4372 } else { | 4357 } else { |
| 4373 __ Neg(result, left); | 4358 __ Neg(result, left); |
| 4374 } | 4359 } |
| 4375 break; | 4360 break; |
| 4376 case 0: | 4361 case 0: |
| 4377 // This case can never overflow. | 4362 // This case can never overflow. |
| 4378 __ Mov(result, 0); | 4363 __ Mov(result, 0); |
| 4379 break; | 4364 break; |
| 4380 case 1: | 4365 case 1: |
| 4381 // This case can never overflow. | 4366 // This case can never overflow. |
| 4382 __ Mov(result, left, kDiscardForSameWReg); | 4367 __ Mov(result, left, kDiscardForSameWReg); |
| 4383 break; | 4368 break; |
| 4384 case 2: | 4369 case 2: |
| 4385 if (can_overflow) { | 4370 if (can_overflow) { |
| 4386 __ Adds(result, left, left); | 4371 __ Adds(result, left, left); |
| 4387 DeoptimizeIf(vs, instr->environment()); | 4372 DeoptimizeIf(vs, instr); |
| 4388 } else { | 4373 } else { |
| 4389 __ Add(result, left, left); | 4374 __ Add(result, left, left); |
| 4390 } | 4375 } |
| 4391 break; | 4376 break; |
| 4392 | 4377 |
| 4393 default: | 4378 default: |
| 4394 // Multiplication by constant powers of two (and some related values) | 4379 // Multiplication by constant powers of two (and some related values) |
| 4395 // can be done efficiently with shifted operands. | 4380 // can be done efficiently with shifted operands. |
| 4396 int32_t right_abs = Abs(right); | 4381 int32_t right_abs = Abs(right); |
| 4397 | 4382 |
| 4398 if (base::bits::IsPowerOfTwo32(right_abs)) { | 4383 if (base::bits::IsPowerOfTwo32(right_abs)) { |
| 4399 int right_log2 = WhichPowerOf2(right_abs); | 4384 int right_log2 = WhichPowerOf2(right_abs); |
| 4400 | 4385 |
| 4401 if (can_overflow) { | 4386 if (can_overflow) { |
| 4402 Register scratch = result; | 4387 Register scratch = result; |
| 4403 DCHECK(!AreAliased(scratch, left)); | 4388 DCHECK(!AreAliased(scratch, left)); |
| 4404 __ Cls(scratch, left); | 4389 __ Cls(scratch, left); |
| 4405 __ Cmp(scratch, right_log2); | 4390 __ Cmp(scratch, right_log2); |
| 4406 DeoptimizeIf(lt, instr->environment()); | 4391 DeoptimizeIf(lt, instr); |
| 4407 } | 4392 } |
| 4408 | 4393 |
| 4409 if (right >= 0) { | 4394 if (right >= 0) { |
| 4410 // result = left << log2(right) | 4395 // result = left << log2(right) |
| 4411 __ Lsl(result, left, right_log2); | 4396 __ Lsl(result, left, right_log2); |
| 4412 } else { | 4397 } else { |
| 4413 // result = -left << log2(-right) | 4398 // result = -left << log2(-right) |
| 4414 if (can_overflow) { | 4399 if (can_overflow) { |
| 4415 __ Negs(result, Operand(left, LSL, right_log2)); | 4400 __ Negs(result, Operand(left, LSL, right_log2)); |
| 4416 DeoptimizeIf(vs, instr->environment()); | 4401 DeoptimizeIf(vs, instr); |
| 4417 } else { | 4402 } else { |
| 4418 __ Neg(result, Operand(left, LSL, right_log2)); | 4403 __ Neg(result, Operand(left, LSL, right_log2)); |
| 4419 } | 4404 } |
| 4420 } | 4405 } |
| 4421 return; | 4406 return; |
| 4422 } | 4407 } |
| 4423 | 4408 |
| 4424 | 4409 |
| 4425 // For the following cases, we could perform a conservative overflow check | 4410 // For the following cases, we could perform a conservative overflow check |
| 4426 // with CLS as above. However the few cycles saved are likely not worth | 4411 // with CLS as above. However the few cycles saved are likely not worth |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4464 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 4449 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 4465 | 4450 |
| 4466 if (bailout_on_minus_zero && !left.Is(right)) { | 4451 if (bailout_on_minus_zero && !left.Is(right)) { |
| 4467 // If one operand is zero and the other is negative, the result is -0. | 4452 // If one operand is zero and the other is negative, the result is -0. |
| 4468 // - Set Z (eq) if either left or right, or both, are 0. | 4453 // - Set Z (eq) if either left or right, or both, are 0. |
| 4469 __ Cmp(left, 0); | 4454 __ Cmp(left, 0); |
| 4470 __ Ccmp(right, 0, ZFlag, ne); | 4455 __ Ccmp(right, 0, ZFlag, ne); |
| 4471 // - If so (eq), set N (mi) if left + right is negative. | 4456 // - If so (eq), set N (mi) if left + right is negative. |
| 4472 // - Otherwise, clear N. | 4457 // - Otherwise, clear N. |
| 4473 __ Ccmn(left, right, NoFlag, eq); | 4458 __ Ccmn(left, right, NoFlag, eq); |
| 4474 DeoptimizeIf(mi, instr->environment()); | 4459 DeoptimizeIf(mi, instr); |
| 4475 } | 4460 } |
| 4476 | 4461 |
| 4477 if (can_overflow) { | 4462 if (can_overflow) { |
| 4478 __ Smull(result.X(), left, right); | 4463 __ Smull(result.X(), left, right); |
| 4479 __ Cmp(result.X(), Operand(result, SXTW)); | 4464 __ Cmp(result.X(), Operand(result, SXTW)); |
| 4480 DeoptimizeIf(ne, instr->environment()); | 4465 DeoptimizeIf(ne, instr); |
| 4481 } else { | 4466 } else { |
| 4482 __ Mul(result, left, right); | 4467 __ Mul(result, left, right); |
| 4483 } | 4468 } |
| 4484 } | 4469 } |
| 4485 | 4470 |
| 4486 | 4471 |
| 4487 void LCodeGen::DoMulS(LMulS* instr) { | 4472 void LCodeGen::DoMulS(LMulS* instr) { |
| 4488 Register result = ToRegister(instr->result()); | 4473 Register result = ToRegister(instr->result()); |
| 4489 Register left = ToRegister(instr->left()); | 4474 Register left = ToRegister(instr->left()); |
| 4490 Register right = ToRegister(instr->right()); | 4475 Register right = ToRegister(instr->right()); |
| 4491 | 4476 |
| 4492 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 4477 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 4493 bool bailout_on_minus_zero = | 4478 bool bailout_on_minus_zero = |
| 4494 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 4479 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 4495 | 4480 |
| 4496 if (bailout_on_minus_zero && !left.Is(right)) { | 4481 if (bailout_on_minus_zero && !left.Is(right)) { |
| 4497 // If one operand is zero and the other is negative, the result is -0. | 4482 // If one operand is zero and the other is negative, the result is -0. |
| 4498 // - Set Z (eq) if either left or right, or both, are 0. | 4483 // - Set Z (eq) if either left or right, or both, are 0. |
| 4499 __ Cmp(left, 0); | 4484 __ Cmp(left, 0); |
| 4500 __ Ccmp(right, 0, ZFlag, ne); | 4485 __ Ccmp(right, 0, ZFlag, ne); |
| 4501 // - If so (eq), set N (mi) if left + right is negative. | 4486 // - If so (eq), set N (mi) if left + right is negative. |
| 4502 // - Otherwise, clear N. | 4487 // - Otherwise, clear N. |
| 4503 __ Ccmn(left, right, NoFlag, eq); | 4488 __ Ccmn(left, right, NoFlag, eq); |
| 4504 DeoptimizeIf(mi, instr->environment()); | 4489 DeoptimizeIf(mi, instr); |
| 4505 } | 4490 } |
| 4506 | 4491 |
| 4507 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); | 4492 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); |
| 4508 if (can_overflow) { | 4493 if (can_overflow) { |
| 4509 __ Smulh(result, left, right); | 4494 __ Smulh(result, left, right); |
| 4510 __ Cmp(result, Operand(result.W(), SXTW)); | 4495 __ Cmp(result, Operand(result.W(), SXTW)); |
| 4511 __ SmiTag(result); | 4496 __ SmiTag(result); |
| 4512 DeoptimizeIf(ne, instr->environment()); | 4497 DeoptimizeIf(ne, instr); |
| 4513 } else { | 4498 } else { |
| 4514 if (AreAliased(result, left, right)) { | 4499 if (AreAliased(result, left, right)) { |
| 4515 // All three registers are the same: half untag the input and then | 4500 // All three registers are the same: half untag the input and then |
| 4516 // multiply, giving a tagged result. | 4501 // multiply, giving a tagged result. |
| 4517 STATIC_ASSERT((kSmiShift % 2) == 0); | 4502 STATIC_ASSERT((kSmiShift % 2) == 0); |
| 4518 __ Asr(result, left, kSmiShift / 2); | 4503 __ Asr(result, left, kSmiShift / 2); |
| 4519 __ Mul(result, result, result); | 4504 __ Mul(result, result, result); |
| 4520 } else if (result.Is(left) && !left.Is(right)) { | 4505 } else if (result.Is(left) && !left.Is(right)) { |
| 4521 // Registers result and left alias, right is distinct: untag left into | 4506 // Registers result and left alias, right is distinct: untag left into |
| 4522 // result, and then multiply by right, giving a tagged result. | 4507 // result, and then multiply by right, giving a tagged result. |
| (...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4674 __ JumpIfSmi(input, &load_smi); | 4659 __ JumpIfSmi(input, &load_smi); |
| 4675 | 4660 |
| 4676 Label convert_undefined; | 4661 Label convert_undefined; |
| 4677 | 4662 |
| 4678 // Heap number map check. | 4663 // Heap number map check. |
| 4679 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 4664 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 4680 if (can_convert_undefined_to_nan) { | 4665 if (can_convert_undefined_to_nan) { |
| 4681 __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, | 4666 __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, |
| 4682 &convert_undefined); | 4667 &convert_undefined); |
| 4683 } else { | 4668 } else { |
| 4684 DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, | 4669 DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, instr); |
| 4685 instr->environment()); | |
| 4686 } | 4670 } |
| 4687 | 4671 |
| 4688 // Load heap number. | 4672 // Load heap number. |
| 4689 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); | 4673 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); |
| 4690 if (instr->hydrogen()->deoptimize_on_minus_zero()) { | 4674 if (instr->hydrogen()->deoptimize_on_minus_zero()) { |
| 4691 DeoptimizeIfMinusZero(result, instr->environment()); | 4675 DeoptimizeIfMinusZero(result, instr); |
| 4692 } | 4676 } |
| 4693 __ B(&done); | 4677 __ B(&done); |
| 4694 | 4678 |
| 4695 if (can_convert_undefined_to_nan) { | 4679 if (can_convert_undefined_to_nan) { |
| 4696 __ Bind(&convert_undefined); | 4680 __ Bind(&convert_undefined); |
| 4697 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, | 4681 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr); |
| 4698 instr->environment()); | |
| 4699 | 4682 |
| 4700 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4683 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
| 4701 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | 4684 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
| 4702 __ B(&done); | 4685 __ B(&done); |
| 4703 } | 4686 } |
| 4704 | 4687 |
| 4705 } else { | 4688 } else { |
| 4706 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4689 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
| 4707 // Fall through to load_smi. | 4690 // Fall through to load_smi. |
| 4708 } | 4691 } |
| (...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4881 } | 4864 } |
| 4882 } | 4865 } |
| 4883 | 4866 |
| 4884 | 4867 |
| 4885 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4868 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 4886 HChange* hchange = instr->hydrogen(); | 4869 HChange* hchange = instr->hydrogen(); |
| 4887 Register input = ToRegister(instr->value()); | 4870 Register input = ToRegister(instr->value()); |
| 4888 Register output = ToRegister(instr->result()); | 4871 Register output = ToRegister(instr->result()); |
| 4889 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4872 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4890 hchange->value()->CheckFlag(HValue::kUint32)) { | 4873 hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4891 DeoptimizeIfNegative(input.W(), instr->environment()); | 4874 DeoptimizeIfNegative(input.W(), instr); |
| 4892 } | 4875 } |
| 4893 __ SmiTag(output, input); | 4876 __ SmiTag(output, input); |
| 4894 } | 4877 } |
| 4895 | 4878 |
| 4896 | 4879 |
| 4897 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4880 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 4898 Register input = ToRegister(instr->value()); | 4881 Register input = ToRegister(instr->value()); |
| 4899 Register result = ToRegister(instr->result()); | 4882 Register result = ToRegister(instr->result()); |
| 4900 Label done, untag; | 4883 Label done, untag; |
| 4901 | 4884 |
| 4902 if (instr->needs_check()) { | 4885 if (instr->needs_check()) { |
| 4903 DeoptimizeIfNotSmi(input, instr->environment()); | 4886 DeoptimizeIfNotSmi(input, instr); |
| 4904 } | 4887 } |
| 4905 | 4888 |
| 4906 __ Bind(&untag); | 4889 __ Bind(&untag); |
| 4907 __ SmiUntag(result, input); | 4890 __ SmiUntag(result, input); |
| 4908 __ Bind(&done); | 4891 __ Bind(&done); |
| 4909 } | 4892 } |
| 4910 | 4893 |
| 4911 | 4894 |
| 4912 void LCodeGen::DoShiftI(LShiftI* instr) { | 4895 void LCodeGen::DoShiftI(LShiftI* instr) { |
| 4913 LOperand* right_op = instr->right(); | 4896 LOperand* right_op = instr->right(); |
| 4914 Register left = ToRegister32(instr->left()); | 4897 Register left = ToRegister32(instr->left()); |
| 4915 Register result = ToRegister32(instr->result()); | 4898 Register result = ToRegister32(instr->result()); |
| 4916 | 4899 |
| 4917 if (right_op->IsRegister()) { | 4900 if (right_op->IsRegister()) { |
| 4918 Register right = ToRegister32(instr->right()); | 4901 Register right = ToRegister32(instr->right()); |
| 4919 switch (instr->op()) { | 4902 switch (instr->op()) { |
| 4920 case Token::ROR: __ Ror(result, left, right); break; | 4903 case Token::ROR: __ Ror(result, left, right); break; |
| 4921 case Token::SAR: __ Asr(result, left, right); break; | 4904 case Token::SAR: __ Asr(result, left, right); break; |
| 4922 case Token::SHL: __ Lsl(result, left, right); break; | 4905 case Token::SHL: __ Lsl(result, left, right); break; |
| 4923 case Token::SHR: | 4906 case Token::SHR: |
| 4924 __ Lsr(result, left, right); | 4907 __ Lsr(result, left, right); |
| 4925 if (instr->can_deopt()) { | 4908 if (instr->can_deopt()) { |
| 4926 // If `left >>> right` >= 0x80000000, the result is not representable | 4909 // If `left >>> right` >= 0x80000000, the result is not representable |
| 4927 // in a signed 32-bit smi. | 4910 // in a signed 32-bit smi. |
| 4928 DeoptimizeIfNegative(result, instr->environment()); | 4911 DeoptimizeIfNegative(result, instr); |
| 4929 } | 4912 } |
| 4930 break; | 4913 break; |
| 4931 default: UNREACHABLE(); | 4914 default: UNREACHABLE(); |
| 4932 } | 4915 } |
| 4933 } else { | 4916 } else { |
| 4934 DCHECK(right_op->IsConstantOperand()); | 4917 DCHECK(right_op->IsConstantOperand()); |
| 4935 int shift_count = JSShiftAmountFromLConstant(right_op); | 4918 int shift_count = JSShiftAmountFromLConstant(right_op); |
| 4936 if (shift_count == 0) { | 4919 if (shift_count == 0) { |
| 4937 if ((instr->op() == Token::SHR) && instr->can_deopt()) { | 4920 if ((instr->op() == Token::SHR) && instr->can_deopt()) { |
| 4938 DeoptimizeIfNegative(left, instr->environment()); | 4921 DeoptimizeIfNegative(left, instr); |
| 4939 } | 4922 } |
| 4940 __ Mov(result, left, kDiscardForSameWReg); | 4923 __ Mov(result, left, kDiscardForSameWReg); |
| 4941 } else { | 4924 } else { |
| 4942 switch (instr->op()) { | 4925 switch (instr->op()) { |
| 4943 case Token::ROR: __ Ror(result, left, shift_count); break; | 4926 case Token::ROR: __ Ror(result, left, shift_count); break; |
| 4944 case Token::SAR: __ Asr(result, left, shift_count); break; | 4927 case Token::SAR: __ Asr(result, left, shift_count); break; |
| 4945 case Token::SHL: __ Lsl(result, left, shift_count); break; | 4928 case Token::SHL: __ Lsl(result, left, shift_count); break; |
| 4946 case Token::SHR: __ Lsr(result, left, shift_count); break; | 4929 case Token::SHR: __ Lsr(result, left, shift_count); break; |
| 4947 default: UNREACHABLE(); | 4930 default: UNREACHABLE(); |
| 4948 } | 4931 } |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4981 break; | 4964 break; |
| 4982 case Token::SHL: | 4965 case Token::SHL: |
| 4983 __ Lsl(result, left, result); | 4966 __ Lsl(result, left, result); |
| 4984 break; | 4967 break; |
| 4985 case Token::SHR: | 4968 case Token::SHR: |
| 4986 __ Lsr(result, left, result); | 4969 __ Lsr(result, left, result); |
| 4987 __ Bic(result, result, kSmiShiftMask); | 4970 __ Bic(result, result, kSmiShiftMask); |
| 4988 if (instr->can_deopt()) { | 4971 if (instr->can_deopt()) { |
| 4989 // If `left >>> right` >= 0x80000000, the result is not representable | 4972 // If `left >>> right` >= 0x80000000, the result is not representable |
| 4990 // in a signed 32-bit smi. | 4973 // in a signed 32-bit smi. |
| 4991 DeoptimizeIfNegative(result, instr->environment()); | 4974 DeoptimizeIfNegative(result, instr); |
| 4992 } | 4975 } |
| 4993 break; | 4976 break; |
| 4994 default: UNREACHABLE(); | 4977 default: UNREACHABLE(); |
| 4995 } | 4978 } |
| 4996 } else { | 4979 } else { |
| 4997 DCHECK(right_op->IsConstantOperand()); | 4980 DCHECK(right_op->IsConstantOperand()); |
| 4998 int shift_count = JSShiftAmountFromLConstant(right_op); | 4981 int shift_count = JSShiftAmountFromLConstant(right_op); |
| 4999 if (shift_count == 0) { | 4982 if (shift_count == 0) { |
| 5000 if ((instr->op() == Token::SHR) && instr->can_deopt()) { | 4983 if ((instr->op() == Token::SHR) && instr->can_deopt()) { |
| 5001 DeoptimizeIfNegative(left, instr->environment()); | 4984 DeoptimizeIfNegative(left, instr); |
| 5002 } | 4985 } |
| 5003 __ Mov(result, left); | 4986 __ Mov(result, left); |
| 5004 } else { | 4987 } else { |
| 5005 switch (instr->op()) { | 4988 switch (instr->op()) { |
| 5006 case Token::ROR: | 4989 case Token::ROR: |
| 5007 __ SmiUntag(result, left); | 4990 __ SmiUntag(result, left); |
| 5008 __ Ror(result.W(), result.W(), shift_count); | 4991 __ Ror(result.W(), result.W(), shift_count); |
| 5009 __ SmiTag(result); | 4992 __ SmiTag(result); |
| 5010 break; | 4993 break; |
| 5011 case Token::SAR: | 4994 case Token::SAR: |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5120 Register context = ToRegister(instr->context()); | 5103 Register context = ToRegister(instr->context()); |
| 5121 Register value = ToRegister(instr->value()); | 5104 Register value = ToRegister(instr->value()); |
| 5122 Register scratch = ToRegister(instr->temp()); | 5105 Register scratch = ToRegister(instr->temp()); |
| 5123 MemOperand target = ContextMemOperand(context, instr->slot_index()); | 5106 MemOperand target = ContextMemOperand(context, instr->slot_index()); |
| 5124 | 5107 |
| 5125 Label skip_assignment; | 5108 Label skip_assignment; |
| 5126 | 5109 |
| 5127 if (instr->hydrogen()->RequiresHoleCheck()) { | 5110 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 5128 __ Ldr(scratch, target); | 5111 __ Ldr(scratch, target); |
| 5129 if (instr->hydrogen()->DeoptimizesOnHole()) { | 5112 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 5130 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, | 5113 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr); |
| 5131 instr->environment()); | |
| 5132 } else { | 5114 } else { |
| 5133 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); | 5115 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); |
| 5134 } | 5116 } |
| 5135 } | 5117 } |
| 5136 | 5118 |
| 5137 __ Str(value, target); | 5119 __ Str(value, target); |
| 5138 if (instr->hydrogen()->NeedsWriteBarrier()) { | 5120 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 5139 SmiCheck check_needed = | 5121 SmiCheck check_needed = |
| 5140 instr->hydrogen()->value()->type().IsHeapObject() | 5122 instr->hydrogen()->value()->type().IsHeapObject() |
| 5141 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 5123 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| (...skipping 17 matching lines...) Expand all Loading... |
| 5159 // Load the cell. | 5141 // Load the cell. |
| 5160 __ Mov(cell, Operand(instr->hydrogen()->cell().handle())); | 5142 __ Mov(cell, Operand(instr->hydrogen()->cell().handle())); |
| 5161 | 5143 |
| 5162 // If the cell we are storing to contains the hole it could have | 5144 // If the cell we are storing to contains the hole it could have |
| 5163 // been deleted from the property dictionary. In that case, we need | 5145 // been deleted from the property dictionary. In that case, we need |
| 5164 // to update the property details in the property dictionary to mark | 5146 // to update the property details in the property dictionary to mark |
| 5165 // it as no longer deleted. We deoptimize in that case. | 5147 // it as no longer deleted. We deoptimize in that case. |
| 5166 if (instr->hydrogen()->RequiresHoleCheck()) { | 5148 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 5167 Register payload = ToRegister(instr->temp2()); | 5149 Register payload = ToRegister(instr->temp2()); |
| 5168 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); | 5150 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
| 5169 DeoptimizeIfRoot( | 5151 DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr); |
| 5170 payload, Heap::kTheHoleValueRootIndex, instr->environment()); | |
| 5171 } | 5152 } |
| 5172 | 5153 |
| 5173 // Store the value. | 5154 // Store the value. |
| 5174 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset)); | 5155 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset)); |
| 5175 // Cells are always rescanned, so no write barrier here. | 5156 // Cells are always rescanned, so no write barrier here. |
| 5176 } | 5157 } |
| 5177 | 5158 |
| 5178 | 5159 |
| 5179 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { | 5160 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { |
| 5180 Register ext_ptr = ToRegister(instr->elements()); | 5161 Register ext_ptr = ToRegister(instr->elements()); |
| (...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5577 | 5558 |
| 5578 | 5559 |
| 5579 void LCodeGen::DoSubI(LSubI* instr) { | 5560 void LCodeGen::DoSubI(LSubI* instr) { |
| 5580 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 5561 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 5581 Register result = ToRegister32(instr->result()); | 5562 Register result = ToRegister32(instr->result()); |
| 5582 Register left = ToRegister32(instr->left()); | 5563 Register left = ToRegister32(instr->left()); |
| 5583 Operand right = ToShiftedRightOperand32(instr->right(), instr); | 5564 Operand right = ToShiftedRightOperand32(instr->right(), instr); |
| 5584 | 5565 |
| 5585 if (can_overflow) { | 5566 if (can_overflow) { |
| 5586 __ Subs(result, left, right); | 5567 __ Subs(result, left, right); |
| 5587 DeoptimizeIf(vs, instr->environment()); | 5568 DeoptimizeIf(vs, instr); |
| 5588 } else { | 5569 } else { |
| 5589 __ Sub(result, left, right); | 5570 __ Sub(result, left, right); |
| 5590 } | 5571 } |
| 5591 } | 5572 } |
| 5592 | 5573 |
| 5593 | 5574 |
| 5594 void LCodeGen::DoSubS(LSubS* instr) { | 5575 void LCodeGen::DoSubS(LSubS* instr) { |
| 5595 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 5576 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 5596 Register result = ToRegister(instr->result()); | 5577 Register result = ToRegister(instr->result()); |
| 5597 Register left = ToRegister(instr->left()); | 5578 Register left = ToRegister(instr->left()); |
| 5598 Operand right = ToOperand(instr->right()); | 5579 Operand right = ToOperand(instr->right()); |
| 5599 if (can_overflow) { | 5580 if (can_overflow) { |
| 5600 __ Subs(result, left, right); | 5581 __ Subs(result, left, right); |
| 5601 DeoptimizeIf(vs, instr->environment()); | 5582 DeoptimizeIf(vs, instr); |
| 5602 } else { | 5583 } else { |
| 5603 __ Sub(result, left, right); | 5584 __ Sub(result, left, right); |
| 5604 } | 5585 } |
| 5605 } | 5586 } |
| 5606 | 5587 |
| 5607 | 5588 |
| 5608 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, | 5589 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, |
| 5609 LOperand* value, | 5590 LOperand* value, |
| 5610 LOperand* temp1, | 5591 LOperand* temp1, |
| 5611 LOperand* temp2) { | 5592 LOperand* temp2) { |
| (...skipping 22 matching lines...) Expand all Loading... |
| 5634 Register true_root = output; | 5615 Register true_root = output; |
| 5635 Register false_root = scratch1; | 5616 Register false_root = scratch1; |
| 5636 __ LoadTrueFalseRoots(true_root, false_root); | 5617 __ LoadTrueFalseRoots(true_root, false_root); |
| 5637 __ Cmp(input, true_root); | 5618 __ Cmp(input, true_root); |
| 5638 __ Cset(output, eq); | 5619 __ Cset(output, eq); |
| 5639 __ Ccmp(input, false_root, ZFlag, ne); | 5620 __ Ccmp(input, false_root, ZFlag, ne); |
| 5640 __ B(eq, &done); | 5621 __ B(eq, &done); |
| 5641 | 5622 |
| 5642 // Output contains zero, undefined is converted to zero for truncating | 5623 // Output contains zero, undefined is converted to zero for truncating |
| 5643 // conversions. | 5624 // conversions. |
| 5644 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, | 5625 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr); |
| 5645 instr->environment()); | |
| 5646 } else { | 5626 } else { |
| 5647 Register output = ToRegister32(instr->result()); | 5627 Register output = ToRegister32(instr->result()); |
| 5648 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); | 5628 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); |
| 5649 | 5629 |
| 5650 __ RecordComment("Deferred TaggedToI: not a heap number"); | 5630 __ RecordComment("Deferred TaggedToI: not a heap number"); |
| 5651 DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, | 5631 DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, instr); |
| 5652 instr->environment()); | |
| 5653 | 5632 |
| 5654 // A heap number: load value and convert to int32 using non-truncating | 5633 // A heap number: load value and convert to int32 using non-truncating |
| 5655 // function. If the result is out of range, branch to deoptimize. | 5634 // function. If the result is out of range, branch to deoptimize. |
| 5656 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); | 5635 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); |
| 5657 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); | 5636 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); |
| 5658 __ RecordComment("Deferred TaggedToI: lost precision or NaN"); | 5637 __ RecordComment("Deferred TaggedToI: lost precision or NaN"); |
| 5659 DeoptimizeIf(ne, instr->environment()); | 5638 DeoptimizeIf(ne, instr); |
| 5660 | 5639 |
| 5661 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5640 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5662 __ Cmp(output, 0); | 5641 __ Cmp(output, 0); |
| 5663 __ B(ne, &done); | 5642 __ B(ne, &done); |
| 5664 __ Fmov(scratch1, dbl_scratch1); | 5643 __ Fmov(scratch1, dbl_scratch1); |
| 5665 __ RecordComment("Deferred TaggedToI: minus zero"); | 5644 __ RecordComment("Deferred TaggedToI: minus zero"); |
| 5666 DeoptimizeIfNegative(scratch1, instr->environment()); | 5645 DeoptimizeIfNegative(scratch1, instr); |
| 5667 } | 5646 } |
| 5668 } | 5647 } |
| 5669 __ Bind(&done); | 5648 __ Bind(&done); |
| 5670 } | 5649 } |
| 5671 | 5650 |
| 5672 | 5651 |
| 5673 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 5652 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 5674 class DeferredTaggedToI: public LDeferredCode { | 5653 class DeferredTaggedToI: public LDeferredCode { |
| 5675 public: | 5654 public: |
| 5676 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 5655 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| (...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5797 } | 5776 } |
| 5798 | 5777 |
| 5799 | 5778 |
| 5800 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 5779 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 5801 Register object = ToRegister(instr->object()); | 5780 Register object = ToRegister(instr->object()); |
| 5802 Register temp1 = ToRegister(instr->temp1()); | 5781 Register temp1 = ToRegister(instr->temp1()); |
| 5803 Register temp2 = ToRegister(instr->temp2()); | 5782 Register temp2 = ToRegister(instr->temp2()); |
| 5804 | 5783 |
| 5805 Label no_memento_found; | 5784 Label no_memento_found; |
| 5806 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); | 5785 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); |
| 5807 DeoptimizeIf(eq, instr->environment()); | 5786 DeoptimizeIf(eq, instr); |
| 5808 __ Bind(&no_memento_found); | 5787 __ Bind(&no_memento_found); |
| 5809 } | 5788 } |
| 5810 | 5789 |
| 5811 | 5790 |
| 5812 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) { | 5791 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) { |
| 5813 DoubleRegister input = ToDoubleRegister(instr->value()); | 5792 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 5814 Register result = ToRegister(instr->result()); | 5793 Register result = ToRegister(instr->result()); |
| 5815 __ TruncateDoubleToI(result, input); | 5794 __ TruncateDoubleToI(result, input); |
| 5816 if (instr->tag_result()) { | 5795 if (instr->tag_result()) { |
| 5817 __ SmiTag(result, result); | 5796 __ SmiTag(result, result); |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5913 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); | 5892 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); |
| 5914 } | 5893 } |
| 5915 | 5894 |
| 5916 | 5895 |
| 5917 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5896 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| 5918 Register object = ToRegister(instr->value()); | 5897 Register object = ToRegister(instr->value()); |
| 5919 Register map = ToRegister(instr->map()); | 5898 Register map = ToRegister(instr->map()); |
| 5920 Register temp = ToRegister(instr->temp()); | 5899 Register temp = ToRegister(instr->temp()); |
| 5921 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | 5900 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5922 __ Cmp(map, temp); | 5901 __ Cmp(map, temp); |
| 5923 DeoptimizeIf(ne, instr->environment()); | 5902 DeoptimizeIf(ne, instr); |
| 5924 } | 5903 } |
| 5925 | 5904 |
| 5926 | 5905 |
| 5927 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { | 5906 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { |
| 5928 Register receiver = ToRegister(instr->receiver()); | 5907 Register receiver = ToRegister(instr->receiver()); |
| 5929 Register function = ToRegister(instr->function()); | 5908 Register function = ToRegister(instr->function()); |
| 5930 Register result = ToRegister(instr->result()); | 5909 Register result = ToRegister(instr->result()); |
| 5931 | 5910 |
| 5932 // If the receiver is null or undefined, we have to pass the global object as | 5911 // If the receiver is null or undefined, we have to pass the global object as |
| 5933 // a receiver to normal functions. Values have to be passed unchanged to | 5912 // a receiver to normal functions. Values have to be passed unchanged to |
| (...skipping 13 matching lines...) Expand all Loading... |
| 5947 | 5926 |
| 5948 // Do not transform the receiver to object for builtins. | 5927 // Do not transform the receiver to object for builtins. |
| 5949 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver); | 5928 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver); |
| 5950 } | 5929 } |
| 5951 | 5930 |
| 5952 // Normal function. Replace undefined or null with global receiver. | 5931 // Normal function. Replace undefined or null with global receiver. |
| 5953 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object); | 5932 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object); |
| 5954 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); | 5933 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); |
| 5955 | 5934 |
| 5956 // Deoptimize if the receiver is not a JS object. | 5935 // Deoptimize if the receiver is not a JS object. |
| 5957 DeoptimizeIfSmi(receiver, instr->environment()); | 5936 DeoptimizeIfSmi(receiver, instr); |
| 5958 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE); | 5937 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE); |
| 5959 __ B(ge, ©_receiver); | 5938 __ B(ge, ©_receiver); |
| 5960 Deoptimize(instr->environment()); | 5939 Deoptimize(instr); |
| 5961 | 5940 |
| 5962 __ Bind(&global_object); | 5941 __ Bind(&global_object); |
| 5963 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 5942 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
| 5964 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX)); | 5943 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX)); |
| 5965 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | 5944 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); |
| 5966 __ B(&done); | 5945 __ B(&done); |
| 5967 | 5946 |
| 5968 __ Bind(©_receiver); | 5947 __ Bind(©_receiver); |
| 5969 __ Mov(result, receiver); | 5948 __ Mov(result, receiver); |
| 5970 __ Bind(&done); | 5949 __ Bind(&done); |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6056 Handle<ScopeInfo> scope_info = instr->scope_info(); | 6035 Handle<ScopeInfo> scope_info = instr->scope_info(); |
| 6057 __ Push(scope_info); | 6036 __ Push(scope_info); |
| 6058 __ Push(ToRegister(instr->function())); | 6037 __ Push(ToRegister(instr->function())); |
| 6059 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 6038 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
| 6060 RecordSafepoint(Safepoint::kNoLazyDeopt); | 6039 RecordSafepoint(Safepoint::kNoLazyDeopt); |
| 6061 } | 6040 } |
| 6062 | 6041 |
| 6063 | 6042 |
| 6064 | 6043 |
| 6065 } } // namespace v8::internal | 6044 } } // namespace v8::internal |
| OLD | NEW |