OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/arm64/lithium-codegen-arm64.h" | 7 #include "src/arm64/lithium-codegen-arm64.h" |
8 #include "src/arm64/lithium-gap-resolver-arm64.h" | 8 #include "src/arm64/lithium-gap-resolver-arm64.h" |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
(...skipping 987 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
998 | 998 |
999 for (int i = 0, length = inlined_closures->length(); i < length; i++) { | 999 for (int i = 0, length = inlined_closures->length(); i < length; i++) { |
1000 DefineDeoptimizationLiteral(inlined_closures->at(i)); | 1000 DefineDeoptimizationLiteral(inlined_closures->at(i)); |
1001 } | 1001 } |
1002 | 1002 |
1003 inlined_function_count_ = deoptimization_literals_.length(); | 1003 inlined_function_count_ = deoptimization_literals_.length(); |
1004 } | 1004 } |
1005 | 1005 |
1006 | 1006 |
1007 void LCodeGen::DeoptimizeBranch( | 1007 void LCodeGen::DeoptimizeBranch( |
1008 LInstruction* instr, const char* detail, BranchType branch_type, | 1008 LInstruction* instr, Deoptimizer::DeoptReason deopt_reason, |
1009 Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) { | 1009 BranchType branch_type, Register reg, int bit, |
| 1010 Deoptimizer::BailoutType* override_bailout_type) { |
1010 LEnvironment* environment = instr->environment(); | 1011 LEnvironment* environment = instr->environment(); |
1011 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 1012 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
1012 Deoptimizer::BailoutType bailout_type = | 1013 Deoptimizer::BailoutType bailout_type = |
1013 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; | 1014 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; |
1014 | 1015 |
1015 if (override_bailout_type != NULL) { | 1016 if (override_bailout_type != NULL) { |
1016 bailout_type = *override_bailout_type; | 1017 bailout_type = *override_bailout_type; |
1017 } | 1018 } |
1018 | 1019 |
1019 DCHECK(environment->HasBeenRegistered()); | 1020 DCHECK(environment->HasBeenRegistered()); |
(...skipping 30 matching lines...) Expand all Loading... |
1050 } | 1051 } |
1051 | 1052 |
1052 if (info()->ShouldTrapOnDeopt()) { | 1053 if (info()->ShouldTrapOnDeopt()) { |
1053 Label dont_trap; | 1054 Label dont_trap; |
1054 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit); | 1055 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit); |
1055 __ Debug("trap_on_deopt", __LINE__, BREAK); | 1056 __ Debug("trap_on_deopt", __LINE__, BREAK); |
1056 __ Bind(&dont_trap); | 1057 __ Bind(&dont_trap); |
1057 } | 1058 } |
1058 | 1059 |
1059 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), | 1060 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), |
1060 instr->Mnemonic(), detail); | 1061 instr->Mnemonic(), deopt_reason); |
1061 DCHECK(info()->IsStub() || frame_is_built_); | 1062 DCHECK(info()->IsStub() || frame_is_built_); |
1062 // Go through jump table if we need to build frame, or restore caller doubles. | 1063 // Go through jump table if we need to build frame, or restore caller doubles. |
1063 if (branch_type == always && | 1064 if (branch_type == always && |
1064 frame_is_built_ && !info()->saves_caller_doubles()) { | 1065 frame_is_built_ && !info()->saves_caller_doubles()) { |
1065 DeoptComment(reason); | 1066 DeoptComment(reason); |
1066 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | 1067 __ Call(entry, RelocInfo::RUNTIME_ENTRY); |
1067 } else { | 1068 } else { |
1068 Deoptimizer::JumpTableEntry* table_entry = | 1069 Deoptimizer::JumpTableEntry* table_entry = |
1069 new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type, | 1070 new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type, |
1070 !frame_is_built_); | 1071 !frame_is_built_); |
1071 // We often have several deopts to the same entry, reuse the last | 1072 // We often have several deopts to the same entry, reuse the last |
1072 // jump entry if this is the case. | 1073 // jump entry if this is the case. |
1073 if (jump_table_.is_empty() || | 1074 if (jump_table_.is_empty() || |
1074 !table_entry->IsEquivalentTo(*jump_table_.last())) { | 1075 !table_entry->IsEquivalentTo(*jump_table_.last())) { |
1075 jump_table_.Add(table_entry, zone()); | 1076 jump_table_.Add(table_entry, zone()); |
1076 } | 1077 } |
1077 __ B(&jump_table_.last()->label, branch_type, reg, bit); | 1078 __ B(&jump_table_.last()->label, branch_type, reg, bit); |
1078 } | 1079 } |
1079 } | 1080 } |
1080 | 1081 |
1081 | 1082 |
1082 void LCodeGen::Deoptimize(LInstruction* instr, const char* detail, | 1083 void LCodeGen::Deoptimize(LInstruction* instr, |
| 1084 Deoptimizer::DeoptReason deopt_reason, |
1083 Deoptimizer::BailoutType* override_bailout_type) { | 1085 Deoptimizer::BailoutType* override_bailout_type) { |
1084 DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type); | 1086 DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1, |
| 1087 override_bailout_type); |
1085 } | 1088 } |
1086 | 1089 |
1087 | 1090 |
1088 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, | 1091 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, |
1089 const char* detail) { | 1092 Deoptimizer::DeoptReason deopt_reason) { |
1090 DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond)); | 1093 DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond)); |
1091 } | 1094 } |
1092 | 1095 |
1093 | 1096 |
1094 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr, | 1097 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr, |
1095 const char* detail) { | 1098 Deoptimizer::DeoptReason deopt_reason) { |
1096 DeoptimizeBranch(instr, detail, reg_zero, rt); | 1099 DeoptimizeBranch(instr, deopt_reason, reg_zero, rt); |
1097 } | 1100 } |
1098 | 1101 |
1099 | 1102 |
1100 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr, | 1103 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr, |
1101 const char* detail) { | 1104 Deoptimizer::DeoptReason deopt_reason) { |
1102 DeoptimizeBranch(instr, detail, reg_not_zero, rt); | 1105 DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt); |
1103 } | 1106 } |
1104 | 1107 |
1105 | 1108 |
1106 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr, | 1109 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr, |
1107 const char* detail) { | 1110 Deoptimizer::DeoptReason deopt_reason) { |
1108 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; | 1111 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; |
1109 DeoptimizeIfBitSet(rt, sign_bit, instr, detail); | 1112 DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason); |
1110 } | 1113 } |
1111 | 1114 |
1112 | 1115 |
1113 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr, | 1116 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr, |
1114 const char* detail) { | 1117 Deoptimizer::DeoptReason deopt_reason) { |
1115 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail); | 1118 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason); |
1116 } | 1119 } |
1117 | 1120 |
1118 | 1121 |
1119 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr, | 1122 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr, |
1120 const char* detail) { | 1123 Deoptimizer::DeoptReason deopt_reason) { |
1121 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail); | 1124 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason); |
1122 } | 1125 } |
1123 | 1126 |
1124 | 1127 |
1125 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, | 1128 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, |
1126 LInstruction* instr, const char* detail) { | 1129 LInstruction* instr, |
| 1130 Deoptimizer::DeoptReason deopt_reason) { |
1127 __ CompareRoot(rt, index); | 1131 __ CompareRoot(rt, index); |
1128 DeoptimizeIf(eq, instr, detail); | 1132 DeoptimizeIf(eq, instr, deopt_reason); |
1129 } | 1133 } |
1130 | 1134 |
1131 | 1135 |
1132 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, | 1136 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, |
1133 LInstruction* instr, const char* detail) { | 1137 LInstruction* instr, |
| 1138 Deoptimizer::DeoptReason deopt_reason) { |
1134 __ CompareRoot(rt, index); | 1139 __ CompareRoot(rt, index); |
1135 DeoptimizeIf(ne, instr, detail); | 1140 DeoptimizeIf(ne, instr, deopt_reason); |
1136 } | 1141 } |
1137 | 1142 |
1138 | 1143 |
1139 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, | 1144 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, |
1140 const char* detail) { | 1145 Deoptimizer::DeoptReason deopt_reason) { |
1141 __ TestForMinusZero(input); | 1146 __ TestForMinusZero(input); |
1142 DeoptimizeIf(vs, instr, detail); | 1147 DeoptimizeIf(vs, instr, deopt_reason); |
1143 } | 1148 } |
1144 | 1149 |
1145 | 1150 |
1146 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) { | 1151 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) { |
1147 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex); | 1152 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex); |
1148 DeoptimizeIf(ne, instr, "not heap number"); | 1153 DeoptimizeIf(ne, instr, Deoptimizer::kNotHeapNumber); |
1149 } | 1154 } |
1150 | 1155 |
1151 | 1156 |
1152 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, | 1157 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, |
1153 const char* detail) { | 1158 Deoptimizer::DeoptReason deopt_reason) { |
1154 DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit); | 1159 DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit); |
1155 } | 1160 } |
1156 | 1161 |
1157 | 1162 |
1158 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, | 1163 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, |
1159 const char* detail) { | 1164 Deoptimizer::DeoptReason deopt_reason) { |
1160 DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit); | 1165 DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit); |
1161 } | 1166 } |
1162 | 1167 |
1163 | 1168 |
1164 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { | 1169 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { |
1165 if (!info()->IsStub()) { | 1170 if (!info()->IsStub()) { |
1166 // Ensure that we have enough space after the previous lazy-bailout | 1171 // Ensure that we have enough space after the previous lazy-bailout |
1167 // instruction for patching the code here. | 1172 // instruction for patching the code here. |
1168 intptr_t current_pc = masm()->pc_offset(); | 1173 intptr_t current_pc = masm()->pc_offset(); |
1169 | 1174 |
1170 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { | 1175 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { |
(...skipping 353 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1524 | 1529 |
1525 | 1530 |
1526 void LCodeGen::DoAddI(LAddI* instr) { | 1531 void LCodeGen::DoAddI(LAddI* instr) { |
1527 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1532 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
1528 Register result = ToRegister32(instr->result()); | 1533 Register result = ToRegister32(instr->result()); |
1529 Register left = ToRegister32(instr->left()); | 1534 Register left = ToRegister32(instr->left()); |
1530 Operand right = ToShiftedRightOperand32(instr->right(), instr); | 1535 Operand right = ToShiftedRightOperand32(instr->right(), instr); |
1531 | 1536 |
1532 if (can_overflow) { | 1537 if (can_overflow) { |
1533 __ Adds(result, left, right); | 1538 __ Adds(result, left, right); |
1534 DeoptimizeIf(vs, instr, "overflow"); | 1539 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
1535 } else { | 1540 } else { |
1536 __ Add(result, left, right); | 1541 __ Add(result, left, right); |
1537 } | 1542 } |
1538 } | 1543 } |
1539 | 1544 |
1540 | 1545 |
1541 void LCodeGen::DoAddS(LAddS* instr) { | 1546 void LCodeGen::DoAddS(LAddS* instr) { |
1542 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1547 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
1543 Register result = ToRegister(instr->result()); | 1548 Register result = ToRegister(instr->result()); |
1544 Register left = ToRegister(instr->left()); | 1549 Register left = ToRegister(instr->left()); |
1545 Operand right = ToOperand(instr->right()); | 1550 Operand right = ToOperand(instr->right()); |
1546 if (can_overflow) { | 1551 if (can_overflow) { |
1547 __ Adds(result, left, right); | 1552 __ Adds(result, left, right); |
1548 DeoptimizeIf(vs, instr, "overflow"); | 1553 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
1549 } else { | 1554 } else { |
1550 __ Add(result, left, right); | 1555 __ Add(result, left, right); |
1551 } | 1556 } |
1552 } | 1557 } |
1553 | 1558 |
1554 | 1559 |
1555 void LCodeGen::DoAllocate(LAllocate* instr) { | 1560 void LCodeGen::DoAllocate(LAllocate* instr) { |
1556 class DeferredAllocate: public LDeferredCode { | 1561 class DeferredAllocate: public LDeferredCode { |
1557 public: | 1562 public: |
1558 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) | 1563 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1664 Register scratch = x5; | 1669 Register scratch = x5; |
1665 DCHECK(receiver.Is(x0)); // Used for parameter count. | 1670 DCHECK(receiver.Is(x0)); // Used for parameter count. |
1666 DCHECK(function.Is(x1)); // Required by InvokeFunction. | 1671 DCHECK(function.Is(x1)); // Required by InvokeFunction. |
1667 DCHECK(ToRegister(instr->result()).Is(x0)); | 1672 DCHECK(ToRegister(instr->result()).Is(x0)); |
1668 DCHECK(instr->IsMarkedAsCall()); | 1673 DCHECK(instr->IsMarkedAsCall()); |
1669 | 1674 |
1670 // Copy the arguments to this function possibly from the | 1675 // Copy the arguments to this function possibly from the |
1671 // adaptor frame below it. | 1676 // adaptor frame below it. |
1672 const uint32_t kArgumentsLimit = 1 * KB; | 1677 const uint32_t kArgumentsLimit = 1 * KB; |
1673 __ Cmp(length, kArgumentsLimit); | 1678 __ Cmp(length, kArgumentsLimit); |
1674 DeoptimizeIf(hi, instr, "too many arguments"); | 1679 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments); |
1675 | 1680 |
1676 // Push the receiver and use the register to keep the original | 1681 // Push the receiver and use the register to keep the original |
1677 // number of arguments. | 1682 // number of arguments. |
1678 __ Push(receiver); | 1683 __ Push(receiver); |
1679 Register argc = receiver; | 1684 Register argc = receiver; |
1680 receiver = NoReg; | 1685 receiver = NoReg; |
1681 __ Sxtw(argc, length); | 1686 __ Sxtw(argc, length); |
1682 // The arguments are at a one pointer size offset from elements. | 1687 // The arguments are at a one pointer size offset from elements. |
1683 __ Add(elements, elements, 1 * kPointerSize); | 1688 __ Add(elements, elements, 1 * kPointerSize); |
1684 | 1689 |
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1845 __ Cmp(length, index); | 1850 __ Cmp(length, index); |
1846 cond = CommuteCondition(cond); | 1851 cond = CommuteCondition(cond); |
1847 } else { | 1852 } else { |
1848 Register index = ToRegister32(instr->index()); | 1853 Register index = ToRegister32(instr->index()); |
1849 Operand length = ToOperand32(instr->length()); | 1854 Operand length = ToOperand32(instr->length()); |
1850 __ Cmp(index, length); | 1855 __ Cmp(index, length); |
1851 } | 1856 } |
1852 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 1857 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
1853 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); | 1858 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); |
1854 } else { | 1859 } else { |
1855 DeoptimizeIf(cond, instr, "out of bounds"); | 1860 DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds); |
1856 } | 1861 } |
1857 } | 1862 } |
1858 | 1863 |
1859 | 1864 |
1860 void LCodeGen::DoBranch(LBranch* instr) { | 1865 void LCodeGen::DoBranch(LBranch* instr) { |
1861 Representation r = instr->hydrogen()->value()->representation(); | 1866 Representation r = instr->hydrogen()->value()->representation(); |
1862 Label* true_label = instr->TrueLabel(chunk_); | 1867 Label* true_label = instr->TrueLabel(chunk_); |
1863 Label* false_label = instr->FalseLabel(chunk_); | 1868 Label* false_label = instr->FalseLabel(chunk_); |
1864 | 1869 |
1865 if (r.IsInteger32()) { | 1870 if (r.IsInteger32()) { |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1924 value, Heap::kNullValueRootIndex, false_label); | 1929 value, Heap::kNullValueRootIndex, false_label); |
1925 } | 1930 } |
1926 | 1931 |
1927 if (expected.Contains(ToBooleanStub::SMI)) { | 1932 if (expected.Contains(ToBooleanStub::SMI)) { |
1928 // Smis: 0 -> false, all other -> true. | 1933 // Smis: 0 -> false, all other -> true. |
1929 DCHECK(Smi::FromInt(0) == 0); | 1934 DCHECK(Smi::FromInt(0) == 0); |
1930 __ Cbz(value, false_label); | 1935 __ Cbz(value, false_label); |
1931 __ JumpIfSmi(value, true_label); | 1936 __ JumpIfSmi(value, true_label); |
1932 } else if (expected.NeedsMap()) { | 1937 } else if (expected.NeedsMap()) { |
1933 // If we need a map later and have a smi, deopt. | 1938 // If we need a map later and have a smi, deopt. |
1934 DeoptimizeIfSmi(value, instr, "Smi"); | 1939 DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi); |
1935 } | 1940 } |
1936 | 1941 |
1937 Register map = NoReg; | 1942 Register map = NoReg; |
1938 Register scratch = NoReg; | 1943 Register scratch = NoReg; |
1939 | 1944 |
1940 if (expected.NeedsMap()) { | 1945 if (expected.NeedsMap()) { |
1941 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); | 1946 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); |
1942 map = ToRegister(instr->temp1()); | 1947 map = ToRegister(instr->temp1()); |
1943 scratch = ToRegister(instr->temp2()); | 1948 scratch = ToRegister(instr->temp2()); |
1944 | 1949 |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1985 // If we got a NaN (overflow bit is set), jump to the false branch. | 1990 // If we got a NaN (overflow bit is set), jump to the false branch. |
1986 __ B(vs, false_label); | 1991 __ B(vs, false_label); |
1987 __ B(eq, false_label); | 1992 __ B(eq, false_label); |
1988 __ B(true_label); | 1993 __ B(true_label); |
1989 __ Bind(¬_heap_number); | 1994 __ Bind(¬_heap_number); |
1990 } | 1995 } |
1991 | 1996 |
1992 if (!expected.IsGeneric()) { | 1997 if (!expected.IsGeneric()) { |
1993 // We've seen something for the first time -> deopt. | 1998 // We've seen something for the first time -> deopt. |
1994 // This can only happen if we are not generic already. | 1999 // This can only happen if we are not generic already. |
1995 Deoptimize(instr, "unexpected object"); | 2000 Deoptimize(instr, Deoptimizer::kUnexpectedObject); |
1996 } | 2001 } |
1997 } | 2002 } |
1998 } | 2003 } |
1999 } | 2004 } |
2000 | 2005 |
2001 | 2006 |
2002 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, | 2007 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
2003 int formal_parameter_count, int arity, | 2008 int formal_parameter_count, int arity, |
2004 LInstruction* instr) { | 2009 LInstruction* instr) { |
2005 bool dont_adapt_arguments = | 2010 bool dont_adapt_arguments = |
(...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2191 Register temp = ToRegister(instr->temp()); | 2196 Register temp = ToRegister(instr->temp()); |
2192 { | 2197 { |
2193 PushSafepointRegistersScope scope(this); | 2198 PushSafepointRegistersScope scope(this); |
2194 __ Push(object); | 2199 __ Push(object); |
2195 __ Mov(cp, 0); | 2200 __ Mov(cp, 0); |
2196 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 2201 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
2197 RecordSafepointWithRegisters( | 2202 RecordSafepointWithRegisters( |
2198 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 2203 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
2199 __ StoreToSafepointRegisterSlot(x0, temp); | 2204 __ StoreToSafepointRegisterSlot(x0, temp); |
2200 } | 2205 } |
2201 DeoptimizeIfSmi(temp, instr, "instance migration failed"); | 2206 DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed); |
2202 } | 2207 } |
2203 | 2208 |
2204 | 2209 |
2205 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 2210 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
2206 class DeferredCheckMaps: public LDeferredCode { | 2211 class DeferredCheckMaps: public LDeferredCode { |
2207 public: | 2212 public: |
2208 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 2213 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
2209 : LDeferredCode(codegen), instr_(instr), object_(object) { | 2214 : LDeferredCode(codegen), instr_(instr), object_(object) { |
2210 SetExit(check_maps()); | 2215 SetExit(check_maps()); |
2211 } | 2216 } |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2246 __ CompareMap(map_reg, map); | 2251 __ CompareMap(map_reg, map); |
2247 __ B(eq, &success); | 2252 __ B(eq, &success); |
2248 } | 2253 } |
2249 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 2254 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
2250 __ CompareMap(map_reg, map); | 2255 __ CompareMap(map_reg, map); |
2251 | 2256 |
2252 // We didn't match a map. | 2257 // We didn't match a map. |
2253 if (instr->hydrogen()->HasMigrationTarget()) { | 2258 if (instr->hydrogen()->HasMigrationTarget()) { |
2254 __ B(ne, deferred->entry()); | 2259 __ B(ne, deferred->entry()); |
2255 } else { | 2260 } else { |
2256 DeoptimizeIf(ne, instr, "wrong map"); | 2261 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
2257 } | 2262 } |
2258 | 2263 |
2259 __ Bind(&success); | 2264 __ Bind(&success); |
2260 } | 2265 } |
2261 | 2266 |
2262 | 2267 |
2263 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 2268 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
2264 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 2269 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
2265 DeoptimizeIfSmi(ToRegister(instr->value()), instr, "Smi"); | 2270 DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi); |
2266 } | 2271 } |
2267 } | 2272 } |
2268 | 2273 |
2269 | 2274 |
2270 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 2275 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
2271 Register value = ToRegister(instr->value()); | 2276 Register value = ToRegister(instr->value()); |
2272 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value)); | 2277 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value)); |
2273 DeoptimizeIfNotSmi(value, instr, "not a Smi"); | 2278 DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi); |
2274 } | 2279 } |
2275 | 2280 |
2276 | 2281 |
2277 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 2282 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
2278 Register input = ToRegister(instr->value()); | 2283 Register input = ToRegister(instr->value()); |
2279 Register scratch = ToRegister(instr->temp()); | 2284 Register scratch = ToRegister(instr->temp()); |
2280 | 2285 |
2281 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 2286 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
2282 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 2287 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
2283 | 2288 |
2284 if (instr->hydrogen()->is_interval_check()) { | 2289 if (instr->hydrogen()->is_interval_check()) { |
2285 InstanceType first, last; | 2290 InstanceType first, last; |
2286 instr->hydrogen()->GetCheckInterval(&first, &last); | 2291 instr->hydrogen()->GetCheckInterval(&first, &last); |
2287 | 2292 |
2288 __ Cmp(scratch, first); | 2293 __ Cmp(scratch, first); |
2289 if (first == last) { | 2294 if (first == last) { |
2290 // If there is only one type in the interval check for equality. | 2295 // If there is only one type in the interval check for equality. |
2291 DeoptimizeIf(ne, instr, "wrong instance type"); | 2296 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
2292 } else if (last == LAST_TYPE) { | 2297 } else if (last == LAST_TYPE) { |
2293 // We don't need to compare with the higher bound of the interval. | 2298 // We don't need to compare with the higher bound of the interval. |
2294 DeoptimizeIf(lo, instr, "wrong instance type"); | 2299 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType); |
2295 } else { | 2300 } else { |
2296 // If we are below the lower bound, set the C flag and clear the Z flag | 2301 // If we are below the lower bound, set the C flag and clear the Z flag |
2297 // to force a deopt. | 2302 // to force a deopt. |
2298 __ Ccmp(scratch, last, CFlag, hs); | 2303 __ Ccmp(scratch, last, CFlag, hs); |
2299 DeoptimizeIf(hi, instr, "wrong instance type"); | 2304 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType); |
2300 } | 2305 } |
2301 } else { | 2306 } else { |
2302 uint8_t mask; | 2307 uint8_t mask; |
2303 uint8_t tag; | 2308 uint8_t tag; |
2304 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 2309 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
2305 | 2310 |
2306 if (base::bits::IsPowerOfTwo32(mask)) { | 2311 if (base::bits::IsPowerOfTwo32(mask)) { |
2307 DCHECK((tag == 0) || (tag == mask)); | 2312 DCHECK((tag == 0) || (tag == mask)); |
2308 if (tag == 0) { | 2313 if (tag == 0) { |
2309 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr, | 2314 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr, |
2310 "wrong instance type"); | 2315 Deoptimizer::kWrongInstanceType); |
2311 } else { | 2316 } else { |
2312 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr, | 2317 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr, |
2313 "wrong instance type"); | 2318 Deoptimizer::kWrongInstanceType); |
2314 } | 2319 } |
2315 } else { | 2320 } else { |
2316 if (tag == 0) { | 2321 if (tag == 0) { |
2317 __ Tst(scratch, mask); | 2322 __ Tst(scratch, mask); |
2318 } else { | 2323 } else { |
2319 __ And(scratch, scratch, mask); | 2324 __ And(scratch, scratch, mask); |
2320 __ Cmp(scratch, tag); | 2325 __ Cmp(scratch, tag); |
2321 } | 2326 } |
2322 DeoptimizeIf(ne, instr, "wrong instance type"); | 2327 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
2323 } | 2328 } |
2324 } | 2329 } |
2325 } | 2330 } |
2326 | 2331 |
2327 | 2332 |
2328 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 2333 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
2329 DoubleRegister input = ToDoubleRegister(instr->unclamped()); | 2334 DoubleRegister input = ToDoubleRegister(instr->unclamped()); |
2330 Register result = ToRegister32(instr->result()); | 2335 Register result = ToRegister32(instr->result()); |
2331 __ ClampDoubleToUint8(result, input, double_scratch()); | 2336 __ ClampDoubleToUint8(result, input, double_scratch()); |
2332 } | 2337 } |
(...skipping 19 matching lines...) Expand all Loading... |
2352 __ B(&done); | 2357 __ B(&done); |
2353 | 2358 |
2354 __ Bind(&is_not_smi); | 2359 __ Bind(&is_not_smi); |
2355 | 2360 |
2356 // Check for heap number. | 2361 // Check for heap number. |
2357 Label is_heap_number; | 2362 Label is_heap_number; |
2358 __ JumpIfHeapNumber(input, &is_heap_number); | 2363 __ JumpIfHeapNumber(input, &is_heap_number); |
2359 | 2364 |
2360 // Check for undefined. Undefined is coverted to zero for clamping conversion. | 2365 // Check for undefined. Undefined is coverted to zero for clamping conversion. |
2361 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, | 2366 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, |
2362 "not a heap number/undefined"); | 2367 Deoptimizer::kNotAHeapNumberUndefined); |
2363 __ Mov(result, 0); | 2368 __ Mov(result, 0); |
2364 __ B(&done); | 2369 __ B(&done); |
2365 | 2370 |
2366 // Heap number case. | 2371 // Heap number case. |
2367 __ Bind(&is_heap_number); | 2372 __ Bind(&is_heap_number); |
2368 DoubleRegister dbl_scratch = double_scratch(); | 2373 DoubleRegister dbl_scratch = double_scratch(); |
2369 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1()); | 2374 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1()); |
2370 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); | 2375 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); |
2371 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); | 2376 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); |
2372 | 2377 |
(...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2657 if (isolate()->heap()->InNewSpace(*object)) { | 2662 if (isolate()->heap()->InNewSpace(*object)) { |
2658 UseScratchRegisterScope temps(masm()); | 2663 UseScratchRegisterScope temps(masm()); |
2659 Register temp = temps.AcquireX(); | 2664 Register temp = temps.AcquireX(); |
2660 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 2665 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
2661 __ Mov(temp, Operand(Handle<Object>(cell))); | 2666 __ Mov(temp, Operand(Handle<Object>(cell))); |
2662 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); | 2667 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); |
2663 __ Cmp(reg, temp); | 2668 __ Cmp(reg, temp); |
2664 } else { | 2669 } else { |
2665 __ Cmp(reg, Operand(object)); | 2670 __ Cmp(reg, Operand(object)); |
2666 } | 2671 } |
2667 DeoptimizeIf(ne, instr, "value mismatch"); | 2672 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); |
2668 } | 2673 } |
2669 | 2674 |
2670 | 2675 |
2671 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { | 2676 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
2672 last_lazy_deopt_pc_ = masm()->pc_offset(); | 2677 last_lazy_deopt_pc_ = masm()->pc_offset(); |
2673 DCHECK(instr->HasEnvironment()); | 2678 DCHECK(instr->HasEnvironment()); |
2674 LEnvironment* env = instr->environment(); | 2679 LEnvironment* env = instr->environment(); |
2675 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 2680 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
2676 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 2681 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
2677 } | 2682 } |
2678 | 2683 |
2679 | 2684 |
2680 void LCodeGen::DoDateField(LDateField* instr) { | 2685 void LCodeGen::DoDateField(LDateField* instr) { |
2681 Register object = ToRegister(instr->date()); | 2686 Register object = ToRegister(instr->date()); |
2682 Register result = ToRegister(instr->result()); | 2687 Register result = ToRegister(instr->result()); |
2683 Register temp1 = x10; | 2688 Register temp1 = x10; |
2684 Register temp2 = x11; | 2689 Register temp2 = x11; |
2685 Smi* index = instr->index(); | 2690 Smi* index = instr->index(); |
2686 Label runtime, done; | 2691 Label runtime, done; |
2687 | 2692 |
2688 DCHECK(object.is(result) && object.Is(x0)); | 2693 DCHECK(object.is(result) && object.Is(x0)); |
2689 DCHECK(instr->IsMarkedAsCall()); | 2694 DCHECK(instr->IsMarkedAsCall()); |
2690 | 2695 |
2691 DeoptimizeIfSmi(object, instr, "Smi"); | 2696 DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi); |
2692 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE); | 2697 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE); |
2693 DeoptimizeIf(ne, instr, "not a date object"); | 2698 DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject); |
2694 | 2699 |
2695 if (index->value() == 0) { | 2700 if (index->value() == 0) { |
2696 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); | 2701 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); |
2697 } else { | 2702 } else { |
2698 if (index->value() < JSDate::kFirstUncachedField) { | 2703 if (index->value() < JSDate::kFirstUncachedField) { |
2699 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 2704 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
2700 __ Mov(temp1, Operand(stamp)); | 2705 __ Mov(temp1, Operand(stamp)); |
2701 __ Ldr(temp1, MemOperand(temp1)); | 2706 __ Ldr(temp1, MemOperand(temp1)); |
2702 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset)); | 2707 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset)); |
2703 __ Cmp(temp1, temp2); | 2708 __ Cmp(temp1, temp2); |
(...skipping 29 matching lines...) Expand all Loading... |
2733 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 2738 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
2734 Register dividend = ToRegister32(instr->dividend()); | 2739 Register dividend = ToRegister32(instr->dividend()); |
2735 int32_t divisor = instr->divisor(); | 2740 int32_t divisor = instr->divisor(); |
2736 Register result = ToRegister32(instr->result()); | 2741 Register result = ToRegister32(instr->result()); |
2737 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 2742 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
2738 DCHECK(!result.is(dividend)); | 2743 DCHECK(!result.is(dividend)); |
2739 | 2744 |
2740 // Check for (0 / -x) that will produce negative zero. | 2745 // Check for (0 / -x) that will produce negative zero. |
2741 HDiv* hdiv = instr->hydrogen(); | 2746 HDiv* hdiv = instr->hydrogen(); |
2742 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 2747 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
2743 DeoptimizeIfZero(dividend, instr, "division by zero"); | 2748 DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero); |
2744 } | 2749 } |
2745 // Check for (kMinInt / -1). | 2750 // Check for (kMinInt / -1). |
2746 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 2751 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
2747 // Test dividend for kMinInt by subtracting one (cmp) and checking for | 2752 // Test dividend for kMinInt by subtracting one (cmp) and checking for |
2748 // overflow. | 2753 // overflow. |
2749 __ Cmp(dividend, 1); | 2754 __ Cmp(dividend, 1); |
2750 DeoptimizeIf(vs, instr, "overflow"); | 2755 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
2751 } | 2756 } |
2752 // Deoptimize if remainder will not be 0. | 2757 // Deoptimize if remainder will not be 0. |
2753 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 2758 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
2754 divisor != 1 && divisor != -1) { | 2759 divisor != 1 && divisor != -1) { |
2755 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 2760 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
2756 __ Tst(dividend, mask); | 2761 __ Tst(dividend, mask); |
2757 DeoptimizeIf(ne, instr, "lost precision"); | 2762 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
2758 } | 2763 } |
2759 | 2764 |
2760 if (divisor == -1) { // Nice shortcut, not needed for correctness. | 2765 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
2761 __ Neg(result, dividend); | 2766 __ Neg(result, dividend); |
2762 return; | 2767 return; |
2763 } | 2768 } |
2764 int32_t shift = WhichPowerOf2Abs(divisor); | 2769 int32_t shift = WhichPowerOf2Abs(divisor); |
2765 if (shift == 0) { | 2770 if (shift == 0) { |
2766 __ Mov(result, dividend); | 2771 __ Mov(result, dividend); |
2767 } else if (shift == 1) { | 2772 } else if (shift == 1) { |
2768 __ Add(result, dividend, Operand(dividend, LSR, 31)); | 2773 __ Add(result, dividend, Operand(dividend, LSR, 31)); |
2769 } else { | 2774 } else { |
2770 __ Mov(result, Operand(dividend, ASR, 31)); | 2775 __ Mov(result, Operand(dividend, ASR, 31)); |
2771 __ Add(result, dividend, Operand(result, LSR, 32 - shift)); | 2776 __ Add(result, dividend, Operand(result, LSR, 32 - shift)); |
2772 } | 2777 } |
2773 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); | 2778 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); |
2774 if (divisor < 0) __ Neg(result, result); | 2779 if (divisor < 0) __ Neg(result, result); |
2775 } | 2780 } |
2776 | 2781 |
2777 | 2782 |
2778 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 2783 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
2779 Register dividend = ToRegister32(instr->dividend()); | 2784 Register dividend = ToRegister32(instr->dividend()); |
2780 int32_t divisor = instr->divisor(); | 2785 int32_t divisor = instr->divisor(); |
2781 Register result = ToRegister32(instr->result()); | 2786 Register result = ToRegister32(instr->result()); |
2782 DCHECK(!AreAliased(dividend, result)); | 2787 DCHECK(!AreAliased(dividend, result)); |
2783 | 2788 |
2784 if (divisor == 0) { | 2789 if (divisor == 0) { |
2785 Deoptimize(instr, "division by zero"); | 2790 Deoptimize(instr, Deoptimizer::kDivisionByZero); |
2786 return; | 2791 return; |
2787 } | 2792 } |
2788 | 2793 |
2789 // Check for (0 / -x) that will produce negative zero. | 2794 // Check for (0 / -x) that will produce negative zero. |
2790 HDiv* hdiv = instr->hydrogen(); | 2795 HDiv* hdiv = instr->hydrogen(); |
2791 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 2796 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
2792 DeoptimizeIfZero(dividend, instr, "minus zero"); | 2797 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero); |
2793 } | 2798 } |
2794 | 2799 |
2795 __ TruncatingDiv(result, dividend, Abs(divisor)); | 2800 __ TruncatingDiv(result, dividend, Abs(divisor)); |
2796 if (divisor < 0) __ Neg(result, result); | 2801 if (divisor < 0) __ Neg(result, result); |
2797 | 2802 |
2798 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 2803 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
2799 Register temp = ToRegister32(instr->temp()); | 2804 Register temp = ToRegister32(instr->temp()); |
2800 DCHECK(!AreAliased(dividend, result, temp)); | 2805 DCHECK(!AreAliased(dividend, result, temp)); |
2801 __ Sxtw(dividend.X(), dividend); | 2806 __ Sxtw(dividend.X(), dividend); |
2802 __ Mov(temp, divisor); | 2807 __ Mov(temp, divisor); |
2803 __ Smsubl(temp.X(), result, temp, dividend.X()); | 2808 __ Smsubl(temp.X(), result, temp, dividend.X()); |
2804 DeoptimizeIfNotZero(temp, instr, "lost precision"); | 2809 DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision); |
2805 } | 2810 } |
2806 } | 2811 } |
2807 | 2812 |
2808 | 2813 |
2809 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 2814 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
2810 void LCodeGen::DoDivI(LDivI* instr) { | 2815 void LCodeGen::DoDivI(LDivI* instr) { |
2811 HBinaryOperation* hdiv = instr->hydrogen(); | 2816 HBinaryOperation* hdiv = instr->hydrogen(); |
2812 Register dividend = ToRegister32(instr->dividend()); | 2817 Register dividend = ToRegister32(instr->dividend()); |
2813 Register divisor = ToRegister32(instr->divisor()); | 2818 Register divisor = ToRegister32(instr->divisor()); |
2814 Register result = ToRegister32(instr->result()); | 2819 Register result = ToRegister32(instr->result()); |
2815 | 2820 |
2816 // Issue the division first, and then check for any deopt cases whilst the | 2821 // Issue the division first, and then check for any deopt cases whilst the |
2817 // result is computed. | 2822 // result is computed. |
2818 __ Sdiv(result, dividend, divisor); | 2823 __ Sdiv(result, dividend, divisor); |
2819 | 2824 |
2820 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 2825 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
2821 DCHECK(!instr->temp()); | 2826 DCHECK(!instr->temp()); |
2822 return; | 2827 return; |
2823 } | 2828 } |
2824 | 2829 |
2825 // Check for x / 0. | 2830 // Check for x / 0. |
2826 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 2831 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
2827 DeoptimizeIfZero(divisor, instr, "division by zero"); | 2832 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); |
2828 } | 2833 } |
2829 | 2834 |
2830 // Check for (0 / -x) as that will produce negative zero. | 2835 // Check for (0 / -x) as that will produce negative zero. |
2831 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 2836 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
2832 __ Cmp(divisor, 0); | 2837 __ Cmp(divisor, 0); |
2833 | 2838 |
2834 // If the divisor < 0 (mi), compare the dividend, and deopt if it is | 2839 // If the divisor < 0 (mi), compare the dividend, and deopt if it is |
2835 // zero, ie. zero dividend with negative divisor deopts. | 2840 // zero, ie. zero dividend with negative divisor deopts. |
2836 // If the divisor >= 0 (pl, the opposite of mi) set the flags to | 2841 // If the divisor >= 0 (pl, the opposite of mi) set the flags to |
2837 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. | 2842 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. |
2838 __ Ccmp(dividend, 0, NoFlag, mi); | 2843 __ Ccmp(dividend, 0, NoFlag, mi); |
2839 DeoptimizeIf(eq, instr, "minus zero"); | 2844 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
2840 } | 2845 } |
2841 | 2846 |
2842 // Check for (kMinInt / -1). | 2847 // Check for (kMinInt / -1). |
2843 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | 2848 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
2844 // Test dividend for kMinInt by subtracting one (cmp) and checking for | 2849 // Test dividend for kMinInt by subtracting one (cmp) and checking for |
2845 // overflow. | 2850 // overflow. |
2846 __ Cmp(dividend, 1); | 2851 __ Cmp(dividend, 1); |
2847 // If overflow is set, ie. dividend = kMinInt, compare the divisor with | 2852 // If overflow is set, ie. dividend = kMinInt, compare the divisor with |
2848 // -1. If overflow is clear, set the flags for condition ne, as the | 2853 // -1. If overflow is clear, set the flags for condition ne, as the |
2849 // dividend isn't -1, and thus we shouldn't deopt. | 2854 // dividend isn't -1, and thus we shouldn't deopt. |
2850 __ Ccmp(divisor, -1, NoFlag, vs); | 2855 __ Ccmp(divisor, -1, NoFlag, vs); |
2851 DeoptimizeIf(eq, instr, "overflow"); | 2856 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
2852 } | 2857 } |
2853 | 2858 |
2854 // Compute remainder and deopt if it's not zero. | 2859 // Compute remainder and deopt if it's not zero. |
2855 Register remainder = ToRegister32(instr->temp()); | 2860 Register remainder = ToRegister32(instr->temp()); |
2856 __ Msub(remainder, result, divisor, dividend); | 2861 __ Msub(remainder, result, divisor, dividend); |
2857 DeoptimizeIfNotZero(remainder, instr, "lost precision"); | 2862 DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision); |
2858 } | 2863 } |
2859 | 2864 |
2860 | 2865 |
2861 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { | 2866 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { |
2862 DoubleRegister input = ToDoubleRegister(instr->value()); | 2867 DoubleRegister input = ToDoubleRegister(instr->value()); |
2863 Register result = ToRegister32(instr->result()); | 2868 Register result = ToRegister32(instr->result()); |
2864 | 2869 |
2865 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 2870 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
2866 DeoptimizeIfMinusZero(input, instr, "minus zero"); | 2871 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero); |
2867 } | 2872 } |
2868 | 2873 |
2869 __ TryRepresentDoubleAsInt32(result, input, double_scratch()); | 2874 __ TryRepresentDoubleAsInt32(result, input, double_scratch()); |
2870 DeoptimizeIf(ne, instr, "lost precision or NaN"); | 2875 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
2871 | 2876 |
2872 if (instr->tag_result()) { | 2877 if (instr->tag_result()) { |
2873 __ SmiTag(result.X()); | 2878 __ SmiTag(result.X()); |
2874 } | 2879 } |
2875 } | 2880 } |
2876 | 2881 |
2877 | 2882 |
2878 void LCodeGen::DoDrop(LDrop* instr) { | 2883 void LCodeGen::DoDrop(LDrop* instr) { |
2879 __ Drop(instr->count()); | 2884 __ Drop(instr->count()); |
2880 } | 2885 } |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2921 __ EnumLengthUntagged(result, map); | 2926 __ EnumLengthUntagged(result, map); |
2922 __ Cbnz(result, &load_cache); | 2927 __ Cbnz(result, &load_cache); |
2923 | 2928 |
2924 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array())); | 2929 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array())); |
2925 __ B(&done); | 2930 __ B(&done); |
2926 | 2931 |
2927 __ Bind(&load_cache); | 2932 __ Bind(&load_cache); |
2928 __ LoadInstanceDescriptors(map, result); | 2933 __ LoadInstanceDescriptors(map, result); |
2929 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 2934 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
2930 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 2935 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
2931 DeoptimizeIfZero(result, instr, "no cache"); | 2936 DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache); |
2932 | 2937 |
2933 __ Bind(&done); | 2938 __ Bind(&done); |
2934 } | 2939 } |
2935 | 2940 |
2936 | 2941 |
2937 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 2942 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
2938 Register object = ToRegister(instr->object()); | 2943 Register object = ToRegister(instr->object()); |
2939 Register null_value = x5; | 2944 Register null_value = x5; |
2940 | 2945 |
2941 DCHECK(instr->IsMarkedAsCall()); | 2946 DCHECK(instr->IsMarkedAsCall()); |
2942 DCHECK(object.Is(x0)); | 2947 DCHECK(object.Is(x0)); |
2943 | 2948 |
2944 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, "undefined"); | 2949 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, |
| 2950 Deoptimizer::kUndefined); |
2945 | 2951 |
2946 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 2952 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
2947 __ Cmp(object, null_value); | 2953 __ Cmp(object, null_value); |
2948 DeoptimizeIf(eq, instr, "null"); | 2954 DeoptimizeIf(eq, instr, Deoptimizer::kNull); |
2949 | 2955 |
2950 DeoptimizeIfSmi(object, instr, "Smi"); | 2956 DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi); |
2951 | 2957 |
2952 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 2958 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
2953 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE); | 2959 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE); |
2954 DeoptimizeIf(le, instr, "not a JavaScript object"); | 2960 DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject); |
2955 | 2961 |
2956 Label use_cache, call_runtime; | 2962 Label use_cache, call_runtime; |
2957 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime); | 2963 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime); |
2958 | 2964 |
2959 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); | 2965 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); |
2960 __ B(&use_cache); | 2966 __ B(&use_cache); |
2961 | 2967 |
2962 // Get the set of properties to enumerate. | 2968 // Get the set of properties to enumerate. |
2963 __ Bind(&call_runtime); | 2969 __ Bind(&call_runtime); |
2964 __ Push(object); | 2970 __ Push(object); |
2965 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 2971 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
2966 | 2972 |
2967 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset)); | 2973 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset)); |
2968 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, "wrong map"); | 2974 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, |
| 2975 Deoptimizer::kWrongMap); |
2969 | 2976 |
2970 __ Bind(&use_cache); | 2977 __ Bind(&use_cache); |
2971 } | 2978 } |
2972 | 2979 |
2973 | 2980 |
2974 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { | 2981 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { |
2975 Register input = ToRegister(instr->value()); | 2982 Register input = ToRegister(instr->value()); |
2976 Register result = ToRegister(instr->result()); | 2983 Register result = ToRegister(instr->result()); |
2977 | 2984 |
2978 __ AssertString(input); | 2985 __ AssertString(input); |
(...skipping 370 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3349 DoGap(label); | 3356 DoGap(label); |
3350 } | 3357 } |
3351 | 3358 |
3352 | 3359 |
3353 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 3360 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
3354 Register context = ToRegister(instr->context()); | 3361 Register context = ToRegister(instr->context()); |
3355 Register result = ToRegister(instr->result()); | 3362 Register result = ToRegister(instr->result()); |
3356 __ Ldr(result, ContextMemOperand(context, instr->slot_index())); | 3363 __ Ldr(result, ContextMemOperand(context, instr->slot_index())); |
3357 if (instr->hydrogen()->RequiresHoleCheck()) { | 3364 if (instr->hydrogen()->RequiresHoleCheck()) { |
3358 if (instr->hydrogen()->DeoptimizesOnHole()) { | 3365 if (instr->hydrogen()->DeoptimizesOnHole()) { |
3359 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); | 3366 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, |
| 3367 Deoptimizer::kHole); |
3360 } else { | 3368 } else { |
3361 Label not_the_hole; | 3369 Label not_the_hole; |
3362 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole); | 3370 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole); |
3363 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | 3371 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
3364 __ Bind(¬_the_hole); | 3372 __ Bind(¬_the_hole); |
3365 } | 3373 } |
3366 } | 3374 } |
3367 } | 3375 } |
3368 | 3376 |
3369 | 3377 |
3370 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { | 3378 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { |
3371 Register function = ToRegister(instr->function()); | 3379 Register function = ToRegister(instr->function()); |
3372 Register result = ToRegister(instr->result()); | 3380 Register result = ToRegister(instr->result()); |
3373 Register temp = ToRegister(instr->temp()); | 3381 Register temp = ToRegister(instr->temp()); |
3374 | 3382 |
3375 // Get the prototype or initial map from the function. | 3383 // Get the prototype or initial map from the function. |
3376 __ Ldr(result, FieldMemOperand(function, | 3384 __ Ldr(result, FieldMemOperand(function, |
3377 JSFunction::kPrototypeOrInitialMapOffset)); | 3385 JSFunction::kPrototypeOrInitialMapOffset)); |
3378 | 3386 |
3379 // Check that the function has a prototype or an initial map. | 3387 // Check that the function has a prototype or an initial map. |
3380 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); | 3388 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, |
| 3389 Deoptimizer::kHole); |
3381 | 3390 |
3382 // If the function does not have an initial map, we're done. | 3391 // If the function does not have an initial map, we're done. |
3383 Label done; | 3392 Label done; |
3384 __ CompareObjectType(result, temp, temp, MAP_TYPE); | 3393 __ CompareObjectType(result, temp, temp, MAP_TYPE); |
3385 __ B(ne, &done); | 3394 __ B(ne, &done); |
3386 | 3395 |
3387 // Get the prototype from the initial map. | 3396 // Get the prototype from the initial map. |
3388 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 3397 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
3389 | 3398 |
3390 // All done. | 3399 // All done. |
3391 __ Bind(&done); | 3400 __ Bind(&done); |
3392 } | 3401 } |
3393 | 3402 |
3394 | 3403 |
3395 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 3404 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
3396 Register result = ToRegister(instr->result()); | 3405 Register result = ToRegister(instr->result()); |
3397 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | 3406 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
3398 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); | 3407 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); |
3399 if (instr->hydrogen()->RequiresHoleCheck()) { | 3408 if (instr->hydrogen()->RequiresHoleCheck()) { |
3400 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); | 3409 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, |
| 3410 Deoptimizer::kHole); |
3401 } | 3411 } |
3402 } | 3412 } |
3403 | 3413 |
3404 | 3414 |
3405 template <class T> | 3415 template <class T> |
3406 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 3416 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { |
3407 DCHECK(FLAG_vector_ics); | 3417 DCHECK(FLAG_vector_ics); |
3408 Register vector_register = ToRegister(instr->temp_vector()); | 3418 Register vector_register = ToRegister(instr->temp_vector()); |
3409 Register slot_register = VectorLoadICDescriptor::SlotRegister(); | 3419 Register slot_register = VectorLoadICDescriptor::SlotRegister(); |
3410 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); | 3420 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3526 case EXTERNAL_INT32_ELEMENTS: | 3536 case EXTERNAL_INT32_ELEMENTS: |
3527 case INT32_ELEMENTS: | 3537 case INT32_ELEMENTS: |
3528 __ Ldrsw(result, mem_op); | 3538 __ Ldrsw(result, mem_op); |
3529 break; | 3539 break; |
3530 case EXTERNAL_UINT32_ELEMENTS: | 3540 case EXTERNAL_UINT32_ELEMENTS: |
3531 case UINT32_ELEMENTS: | 3541 case UINT32_ELEMENTS: |
3532 __ Ldr(result.W(), mem_op); | 3542 __ Ldr(result.W(), mem_op); |
3533 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3543 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
3534 // Deopt if value > 0x80000000. | 3544 // Deopt if value > 0x80000000. |
3535 __ Tst(result, 0xFFFFFFFF80000000); | 3545 __ Tst(result, 0xFFFFFFFF80000000); |
3536 DeoptimizeIf(ne, instr, "negative value"); | 3546 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue); |
3537 } | 3547 } |
3538 break; | 3548 break; |
3539 case FLOAT32_ELEMENTS: | 3549 case FLOAT32_ELEMENTS: |
3540 case FLOAT64_ELEMENTS: | 3550 case FLOAT64_ELEMENTS: |
3541 case EXTERNAL_FLOAT32_ELEMENTS: | 3551 case EXTERNAL_FLOAT32_ELEMENTS: |
3542 case EXTERNAL_FLOAT64_ELEMENTS: | 3552 case EXTERNAL_FLOAT64_ELEMENTS: |
3543 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3553 case FAST_HOLEY_DOUBLE_ELEMENTS: |
3544 case FAST_HOLEY_ELEMENTS: | 3554 case FAST_HOLEY_ELEMENTS: |
3545 case FAST_HOLEY_SMI_ELEMENTS: | 3555 case FAST_HOLEY_SMI_ELEMENTS: |
3546 case FAST_DOUBLE_ELEMENTS: | 3556 case FAST_DOUBLE_ELEMENTS: |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3620 instr->hydrogen()->representation(), | 3630 instr->hydrogen()->representation(), |
3621 instr->base_offset()); | 3631 instr->base_offset()); |
3622 } | 3632 } |
3623 | 3633 |
3624 __ Ldr(result, mem_op); | 3634 __ Ldr(result, mem_op); |
3625 | 3635 |
3626 if (instr->hydrogen()->RequiresHoleCheck()) { | 3636 if (instr->hydrogen()->RequiresHoleCheck()) { |
3627 Register scratch = ToRegister(instr->temp()); | 3637 Register scratch = ToRegister(instr->temp()); |
3628 __ Fmov(scratch, result); | 3638 __ Fmov(scratch, result); |
3629 __ Eor(scratch, scratch, kHoleNanInt64); | 3639 __ Eor(scratch, scratch, kHoleNanInt64); |
3630 DeoptimizeIfZero(scratch, instr, "hole"); | 3640 DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole); |
3631 } | 3641 } |
3632 } | 3642 } |
3633 | 3643 |
3634 | 3644 |
3635 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { | 3645 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { |
3636 Register elements = ToRegister(instr->elements()); | 3646 Register elements = ToRegister(instr->elements()); |
3637 Register result = ToRegister(instr->result()); | 3647 Register result = ToRegister(instr->result()); |
3638 MemOperand mem_op; | 3648 MemOperand mem_op; |
3639 | 3649 |
3640 Representation representation = instr->hydrogen()->representation(); | 3650 Representation representation = instr->hydrogen()->representation(); |
(...skipping 17 matching lines...) Expand all Loading... |
3658 | 3668 |
3659 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, | 3669 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, |
3660 instr->hydrogen()->elements_kind(), | 3670 instr->hydrogen()->elements_kind(), |
3661 representation, instr->base_offset()); | 3671 representation, instr->base_offset()); |
3662 } | 3672 } |
3663 | 3673 |
3664 __ Load(result, mem_op, representation); | 3674 __ Load(result, mem_op, representation); |
3665 | 3675 |
3666 if (instr->hydrogen()->RequiresHoleCheck()) { | 3676 if (instr->hydrogen()->RequiresHoleCheck()) { |
3667 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 3677 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
3668 DeoptimizeIfNotSmi(result, instr, "not a Smi"); | 3678 DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi); |
3669 } else { | 3679 } else { |
3670 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); | 3680 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, |
| 3681 Deoptimizer::kHole); |
3671 } | 3682 } |
3672 } | 3683 } |
3673 } | 3684 } |
3674 | 3685 |
3675 | 3686 |
3676 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { | 3687 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { |
3677 DCHECK(ToRegister(instr->context()).is(cp)); | 3688 DCHECK(ToRegister(instr->context()).is(cp)); |
3678 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); | 3689 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); |
3679 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); | 3690 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); |
3680 if (FLAG_vector_ics) { | 3691 if (FLAG_vector_ics) { |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3764 if (r.IsDouble()) { | 3775 if (r.IsDouble()) { |
3765 DoubleRegister input = ToDoubleRegister(instr->value()); | 3776 DoubleRegister input = ToDoubleRegister(instr->value()); |
3766 DoubleRegister result = ToDoubleRegister(instr->result()); | 3777 DoubleRegister result = ToDoubleRegister(instr->result()); |
3767 __ Fabs(result, input); | 3778 __ Fabs(result, input); |
3768 } else if (r.IsSmi() || r.IsInteger32()) { | 3779 } else if (r.IsSmi() || r.IsInteger32()) { |
3769 Register input = r.IsSmi() ? ToRegister(instr->value()) | 3780 Register input = r.IsSmi() ? ToRegister(instr->value()) |
3770 : ToRegister32(instr->value()); | 3781 : ToRegister32(instr->value()); |
3771 Register result = r.IsSmi() ? ToRegister(instr->result()) | 3782 Register result = r.IsSmi() ? ToRegister(instr->result()) |
3772 : ToRegister32(instr->result()); | 3783 : ToRegister32(instr->result()); |
3773 __ Abs(result, input); | 3784 __ Abs(result, input); |
3774 DeoptimizeIf(vs, instr, "overflow"); | 3785 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
3775 } | 3786 } |
3776 } | 3787 } |
3777 | 3788 |
3778 | 3789 |
3779 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr, | 3790 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr, |
3780 Label* exit, | 3791 Label* exit, |
3781 Label* allocation_entry) { | 3792 Label* allocation_entry) { |
3782 // Handle the tricky cases of MathAbsTagged: | 3793 // Handle the tricky cases of MathAbsTagged: |
3783 // - HeapNumber inputs. | 3794 // - HeapNumber inputs. |
3784 // - Negative inputs produce a positive result, so a new HeapNumber is | 3795 // - Negative inputs produce a positive result, so a new HeapNumber is |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3916 | 3927 |
3917 __ Frintm(result, input); | 3928 __ Frintm(result, input); |
3918 } | 3929 } |
3919 | 3930 |
3920 | 3931 |
3921 void LCodeGen::DoMathFloorI(LMathFloorI* instr) { | 3932 void LCodeGen::DoMathFloorI(LMathFloorI* instr) { |
3922 DoubleRegister input = ToDoubleRegister(instr->value()); | 3933 DoubleRegister input = ToDoubleRegister(instr->value()); |
3923 Register result = ToRegister(instr->result()); | 3934 Register result = ToRegister(instr->result()); |
3924 | 3935 |
3925 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3936 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3926 DeoptimizeIfMinusZero(input, instr, "minus zero"); | 3937 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero); |
3927 } | 3938 } |
3928 | 3939 |
3929 __ Fcvtms(result, input); | 3940 __ Fcvtms(result, input); |
3930 | 3941 |
3931 // Check that the result fits into a 32-bit integer. | 3942 // Check that the result fits into a 32-bit integer. |
3932 // - The result did not overflow. | 3943 // - The result did not overflow. |
3933 __ Cmp(result, Operand(result, SXTW)); | 3944 __ Cmp(result, Operand(result, SXTW)); |
3934 // - The input was not NaN. | 3945 // - The input was not NaN. |
3935 __ Fccmp(input, input, NoFlag, eq); | 3946 __ Fccmp(input, input, NoFlag, eq); |
3936 DeoptimizeIf(ne, instr, "lost precision or NaN"); | 3947 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
3937 } | 3948 } |
3938 | 3949 |
3939 | 3950 |
3940 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | 3951 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
3941 Register dividend = ToRegister32(instr->dividend()); | 3952 Register dividend = ToRegister32(instr->dividend()); |
3942 Register result = ToRegister32(instr->result()); | 3953 Register result = ToRegister32(instr->result()); |
3943 int32_t divisor = instr->divisor(); | 3954 int32_t divisor = instr->divisor(); |
3944 | 3955 |
3945 // If the divisor is 1, return the dividend. | 3956 // If the divisor is 1, return the dividend. |
3946 if (divisor == 1) { | 3957 if (divisor == 1) { |
3947 __ Mov(result, dividend, kDiscardForSameWReg); | 3958 __ Mov(result, dividend, kDiscardForSameWReg); |
3948 return; | 3959 return; |
3949 } | 3960 } |
3950 | 3961 |
3951 // If the divisor is positive, things are easy: There can be no deopts and we | 3962 // If the divisor is positive, things are easy: There can be no deopts and we |
3952 // can simply do an arithmetic right shift. | 3963 // can simply do an arithmetic right shift. |
3953 int32_t shift = WhichPowerOf2Abs(divisor); | 3964 int32_t shift = WhichPowerOf2Abs(divisor); |
3954 if (divisor > 1) { | 3965 if (divisor > 1) { |
3955 __ Mov(result, Operand(dividend, ASR, shift)); | 3966 __ Mov(result, Operand(dividend, ASR, shift)); |
3956 return; | 3967 return; |
3957 } | 3968 } |
3958 | 3969 |
3959 // If the divisor is negative, we have to negate and handle edge cases. | 3970 // If the divisor is negative, we have to negate and handle edge cases. |
3960 __ Negs(result, dividend); | 3971 __ Negs(result, dividend); |
3961 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3972 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3962 DeoptimizeIf(eq, instr, "minus zero"); | 3973 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
3963 } | 3974 } |
3964 | 3975 |
3965 // Dividing by -1 is basically negation, unless we overflow. | 3976 // Dividing by -1 is basically negation, unless we overflow. |
3966 if (divisor == -1) { | 3977 if (divisor == -1) { |
3967 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 3978 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
3968 DeoptimizeIf(vs, instr, "overflow"); | 3979 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
3969 } | 3980 } |
3970 return; | 3981 return; |
3971 } | 3982 } |
3972 | 3983 |
3973 // If the negation could not overflow, simply shifting is OK. | 3984 // If the negation could not overflow, simply shifting is OK. |
3974 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 3985 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
3975 __ Mov(result, Operand(dividend, ASR, shift)); | 3986 __ Mov(result, Operand(dividend, ASR, shift)); |
3976 return; | 3987 return; |
3977 } | 3988 } |
3978 | 3989 |
3979 __ Asr(result, result, shift); | 3990 __ Asr(result, result, shift); |
3980 __ Csel(result, result, kMinInt / divisor, vc); | 3991 __ Csel(result, result, kMinInt / divisor, vc); |
3981 } | 3992 } |
3982 | 3993 |
3983 | 3994 |
3984 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 3995 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
3985 Register dividend = ToRegister32(instr->dividend()); | 3996 Register dividend = ToRegister32(instr->dividend()); |
3986 int32_t divisor = instr->divisor(); | 3997 int32_t divisor = instr->divisor(); |
3987 Register result = ToRegister32(instr->result()); | 3998 Register result = ToRegister32(instr->result()); |
3988 DCHECK(!AreAliased(dividend, result)); | 3999 DCHECK(!AreAliased(dividend, result)); |
3989 | 4000 |
3990 if (divisor == 0) { | 4001 if (divisor == 0) { |
3991 Deoptimize(instr, "division by zero"); | 4002 Deoptimize(instr, Deoptimizer::kDivisionByZero); |
3992 return; | 4003 return; |
3993 } | 4004 } |
3994 | 4005 |
3995 // Check for (0 / -x) that will produce negative zero. | 4006 // Check for (0 / -x) that will produce negative zero. |
3996 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 4007 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
3997 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 4008 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
3998 DeoptimizeIfZero(dividend, instr, "minus zero"); | 4009 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero); |
3999 } | 4010 } |
4000 | 4011 |
4001 // Easy case: We need no dynamic check for the dividend and the flooring | 4012 // Easy case: We need no dynamic check for the dividend and the flooring |
4002 // division is the same as the truncating division. | 4013 // division is the same as the truncating division. |
4003 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 4014 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
4004 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 4015 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
4005 __ TruncatingDiv(result, dividend, Abs(divisor)); | 4016 __ TruncatingDiv(result, dividend, Abs(divisor)); |
4006 if (divisor < 0) __ Neg(result, result); | 4017 if (divisor < 0) __ Neg(result, result); |
4007 return; | 4018 return; |
4008 } | 4019 } |
(...skipping 22 matching lines...) Expand all Loading... |
4031 Register dividend = ToRegister32(instr->dividend()); | 4042 Register dividend = ToRegister32(instr->dividend()); |
4032 Register divisor = ToRegister32(instr->divisor()); | 4043 Register divisor = ToRegister32(instr->divisor()); |
4033 Register remainder = ToRegister32(instr->temp()); | 4044 Register remainder = ToRegister32(instr->temp()); |
4034 Register result = ToRegister32(instr->result()); | 4045 Register result = ToRegister32(instr->result()); |
4035 | 4046 |
4036 // This can't cause an exception on ARM, so we can speculatively | 4047 // This can't cause an exception on ARM, so we can speculatively |
4037 // execute it already now. | 4048 // execute it already now. |
4038 __ Sdiv(result, dividend, divisor); | 4049 __ Sdiv(result, dividend, divisor); |
4039 | 4050 |
4040 // Check for x / 0. | 4051 // Check for x / 0. |
4041 DeoptimizeIfZero(divisor, instr, "division by zero"); | 4052 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); |
4042 | 4053 |
4043 // Check for (kMinInt / -1). | 4054 // Check for (kMinInt / -1). |
4044 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 4055 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
4045 // The V flag will be set iff dividend == kMinInt. | 4056 // The V flag will be set iff dividend == kMinInt. |
4046 __ Cmp(dividend, 1); | 4057 __ Cmp(dividend, 1); |
4047 __ Ccmp(divisor, -1, NoFlag, vs); | 4058 __ Ccmp(divisor, -1, NoFlag, vs); |
4048 DeoptimizeIf(eq, instr, "overflow"); | 4059 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
4049 } | 4060 } |
4050 | 4061 |
4051 // Check for (0 / -x) that will produce negative zero. | 4062 // Check for (0 / -x) that will produce negative zero. |
4052 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4063 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
4053 __ Cmp(divisor, 0); | 4064 __ Cmp(divisor, 0); |
4054 __ Ccmp(dividend, 0, ZFlag, mi); | 4065 __ Ccmp(dividend, 0, ZFlag, mi); |
4055 // "divisor" can't be null because the code would have already been | 4066 // "divisor" can't be null because the code would have already been |
4056 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). | 4067 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). |
4057 // In this case we need to deoptimize to produce a -0. | 4068 // In this case we need to deoptimize to produce a -0. |
4058 DeoptimizeIf(eq, instr, "minus zero"); | 4069 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
4059 } | 4070 } |
4060 | 4071 |
4061 Label done; | 4072 Label done; |
4062 // If both operands have the same sign then we are done. | 4073 // If both operands have the same sign then we are done. |
4063 __ Eor(remainder, dividend, divisor); | 4074 __ Eor(remainder, dividend, divisor); |
4064 __ Tbz(remainder, kWSignBit, &done); | 4075 __ Tbz(remainder, kWSignBit, &done); |
4065 | 4076 |
4066 // Check if the result needs to be corrected. | 4077 // Check if the result needs to be corrected. |
4067 __ Msub(remainder, result, divisor, dividend); | 4078 __ Msub(remainder, result, divisor, dividend); |
4068 __ Cbz(remainder, &done); | 4079 __ Cbz(remainder, &done); |
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4207 // result fits in 32 bits. | 4218 // result fits in 32 bits. |
4208 __ Cmp(result, Operand(result.W(), SXTW)); | 4219 __ Cmp(result, Operand(result.W(), SXTW)); |
4209 __ Ccmp(result, 1, ZFlag, eq); | 4220 __ Ccmp(result, 1, ZFlag, eq); |
4210 __ B(hi, &done); | 4221 __ B(hi, &done); |
4211 | 4222 |
4212 // At this point, we have to handle possible inputs of NaN or numbers in the | 4223 // At this point, we have to handle possible inputs of NaN or numbers in the |
4213 // range [-0.5, 1.5[, or numbers larger than 32 bits. | 4224 // range [-0.5, 1.5[, or numbers larger than 32 bits. |
4214 | 4225 |
4215 // Deoptimize if the result > 1, as it must be larger than 32 bits. | 4226 // Deoptimize if the result > 1, as it must be larger than 32 bits. |
4216 __ Cmp(result, 1); | 4227 __ Cmp(result, 1); |
4217 DeoptimizeIf(hi, instr, "overflow"); | 4228 DeoptimizeIf(hi, instr, Deoptimizer::kOverflow); |
4218 | 4229 |
4219 // Deoptimize for negative inputs, which at this point are only numbers in | 4230 // Deoptimize for negative inputs, which at this point are only numbers in |
4220 // the range [-0.5, -0.0] | 4231 // the range [-0.5, -0.0] |
4221 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4232 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
4222 __ Fmov(result, input); | 4233 __ Fmov(result, input); |
4223 DeoptimizeIfNegative(result, instr, "minus zero"); | 4234 DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero); |
4224 } | 4235 } |
4225 | 4236 |
4226 // Deoptimize if the input was NaN. | 4237 // Deoptimize if the input was NaN. |
4227 __ Fcmp(input, dot_five); | 4238 __ Fcmp(input, dot_five); |
4228 DeoptimizeIf(vs, instr, "NaN"); | 4239 DeoptimizeIf(vs, instr, Deoptimizer::kNaN); |
4229 | 4240 |
4230 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[ | 4241 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[ |
4231 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1, | 4242 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1, |
4232 // else 0; we avoid dealing with 0.499...94 directly. | 4243 // else 0; we avoid dealing with 0.499...94 directly. |
4233 __ Cset(result, ge); | 4244 __ Cset(result, ge); |
4234 __ Bind(&done); | 4245 __ Bind(&done); |
4235 } | 4246 } |
4236 | 4247 |
4237 | 4248 |
4238 void LCodeGen::DoMathFround(LMathFround* instr) { | 4249 void LCodeGen::DoMathFround(LMathFround* instr) { |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4296 HMod* hmod = instr->hydrogen(); | 4307 HMod* hmod = instr->hydrogen(); |
4297 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 4308 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
4298 Label dividend_is_not_negative, done; | 4309 Label dividend_is_not_negative, done; |
4299 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 4310 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
4300 __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative); | 4311 __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative); |
4301 // Note that this is correct even for kMinInt operands. | 4312 // Note that this is correct even for kMinInt operands. |
4302 __ Neg(dividend, dividend); | 4313 __ Neg(dividend, dividend); |
4303 __ And(dividend, dividend, mask); | 4314 __ And(dividend, dividend, mask); |
4304 __ Negs(dividend, dividend); | 4315 __ Negs(dividend, dividend); |
4305 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4316 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
4306 DeoptimizeIf(eq, instr, "minus zero"); | 4317 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
4307 } | 4318 } |
4308 __ B(&done); | 4319 __ B(&done); |
4309 } | 4320 } |
4310 | 4321 |
4311 __ bind(÷nd_is_not_negative); | 4322 __ bind(÷nd_is_not_negative); |
4312 __ And(dividend, dividend, mask); | 4323 __ And(dividend, dividend, mask); |
4313 __ bind(&done); | 4324 __ bind(&done); |
4314 } | 4325 } |
4315 | 4326 |
4316 | 4327 |
4317 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 4328 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
4318 Register dividend = ToRegister32(instr->dividend()); | 4329 Register dividend = ToRegister32(instr->dividend()); |
4319 int32_t divisor = instr->divisor(); | 4330 int32_t divisor = instr->divisor(); |
4320 Register result = ToRegister32(instr->result()); | 4331 Register result = ToRegister32(instr->result()); |
4321 Register temp = ToRegister32(instr->temp()); | 4332 Register temp = ToRegister32(instr->temp()); |
4322 DCHECK(!AreAliased(dividend, result, temp)); | 4333 DCHECK(!AreAliased(dividend, result, temp)); |
4323 | 4334 |
4324 if (divisor == 0) { | 4335 if (divisor == 0) { |
4325 Deoptimize(instr, "division by zero"); | 4336 Deoptimize(instr, Deoptimizer::kDivisionByZero); |
4326 return; | 4337 return; |
4327 } | 4338 } |
4328 | 4339 |
4329 __ TruncatingDiv(result, dividend, Abs(divisor)); | 4340 __ TruncatingDiv(result, dividend, Abs(divisor)); |
4330 __ Sxtw(dividend.X(), dividend); | 4341 __ Sxtw(dividend.X(), dividend); |
4331 __ Mov(temp, Abs(divisor)); | 4342 __ Mov(temp, Abs(divisor)); |
4332 __ Smsubl(result.X(), result, temp, dividend.X()); | 4343 __ Smsubl(result.X(), result, temp, dividend.X()); |
4333 | 4344 |
4334 // Check for negative zero. | 4345 // Check for negative zero. |
4335 HMod* hmod = instr->hydrogen(); | 4346 HMod* hmod = instr->hydrogen(); |
4336 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4347 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
4337 Label remainder_not_zero; | 4348 Label remainder_not_zero; |
4338 __ Cbnz(result, &remainder_not_zero); | 4349 __ Cbnz(result, &remainder_not_zero); |
4339 DeoptimizeIfNegative(dividend, instr, "minus zero"); | 4350 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero); |
4340 __ bind(&remainder_not_zero); | 4351 __ bind(&remainder_not_zero); |
4341 } | 4352 } |
4342 } | 4353 } |
4343 | 4354 |
4344 | 4355 |
4345 void LCodeGen::DoModI(LModI* instr) { | 4356 void LCodeGen::DoModI(LModI* instr) { |
4346 Register dividend = ToRegister32(instr->left()); | 4357 Register dividend = ToRegister32(instr->left()); |
4347 Register divisor = ToRegister32(instr->right()); | 4358 Register divisor = ToRegister32(instr->right()); |
4348 Register result = ToRegister32(instr->result()); | 4359 Register result = ToRegister32(instr->result()); |
4349 | 4360 |
4350 Label done; | 4361 Label done; |
4351 // modulo = dividend - quotient * divisor | 4362 // modulo = dividend - quotient * divisor |
4352 __ Sdiv(result, dividend, divisor); | 4363 __ Sdiv(result, dividend, divisor); |
4353 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { | 4364 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { |
4354 DeoptimizeIfZero(divisor, instr, "division by zero"); | 4365 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); |
4355 } | 4366 } |
4356 __ Msub(result, result, divisor, dividend); | 4367 __ Msub(result, result, divisor, dividend); |
4357 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4368 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
4358 __ Cbnz(result, &done); | 4369 __ Cbnz(result, &done); |
4359 DeoptimizeIfNegative(dividend, instr, "minus zero"); | 4370 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero); |
4360 } | 4371 } |
4361 __ Bind(&done); | 4372 __ Bind(&done); |
4362 } | 4373 } |
4363 | 4374 |
4364 | 4375 |
4365 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { | 4376 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { |
4366 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32()); | 4377 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32()); |
4367 bool is_smi = instr->hydrogen()->representation().IsSmi(); | 4378 bool is_smi = instr->hydrogen()->representation().IsSmi(); |
4368 Register result = | 4379 Register result = |
4369 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); | 4380 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); |
4370 Register left = | 4381 Register left = |
4371 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ; | 4382 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ; |
4372 int32_t right = ToInteger32(instr->right()); | 4383 int32_t right = ToInteger32(instr->right()); |
4373 DCHECK((right > -kMaxInt) || (right < kMaxInt)); | 4384 DCHECK((right > -kMaxInt) || (right < kMaxInt)); |
4374 | 4385 |
4375 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 4386 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
4376 bool bailout_on_minus_zero = | 4387 bool bailout_on_minus_zero = |
4377 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 4388 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
4378 | 4389 |
4379 if (bailout_on_minus_zero) { | 4390 if (bailout_on_minus_zero) { |
4380 if (right < 0) { | 4391 if (right < 0) { |
4381 // The result is -0 if right is negative and left is zero. | 4392 // The result is -0 if right is negative and left is zero. |
4382 DeoptimizeIfZero(left, instr, "minus zero"); | 4393 DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero); |
4383 } else if (right == 0) { | 4394 } else if (right == 0) { |
4384 // The result is -0 if the right is zero and the left is negative. | 4395 // The result is -0 if the right is zero and the left is negative. |
4385 DeoptimizeIfNegative(left, instr, "minus zero"); | 4396 DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero); |
4386 } | 4397 } |
4387 } | 4398 } |
4388 | 4399 |
4389 switch (right) { | 4400 switch (right) { |
4390 // Cases which can detect overflow. | 4401 // Cases which can detect overflow. |
4391 case -1: | 4402 case -1: |
4392 if (can_overflow) { | 4403 if (can_overflow) { |
4393 // Only 0x80000000 can overflow here. | 4404 // Only 0x80000000 can overflow here. |
4394 __ Negs(result, left); | 4405 __ Negs(result, left); |
4395 DeoptimizeIf(vs, instr, "overflow"); | 4406 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
4396 } else { | 4407 } else { |
4397 __ Neg(result, left); | 4408 __ Neg(result, left); |
4398 } | 4409 } |
4399 break; | 4410 break; |
4400 case 0: | 4411 case 0: |
4401 // This case can never overflow. | 4412 // This case can never overflow. |
4402 __ Mov(result, 0); | 4413 __ Mov(result, 0); |
4403 break; | 4414 break; |
4404 case 1: | 4415 case 1: |
4405 // This case can never overflow. | 4416 // This case can never overflow. |
4406 __ Mov(result, left, kDiscardForSameWReg); | 4417 __ Mov(result, left, kDiscardForSameWReg); |
4407 break; | 4418 break; |
4408 case 2: | 4419 case 2: |
4409 if (can_overflow) { | 4420 if (can_overflow) { |
4410 __ Adds(result, left, left); | 4421 __ Adds(result, left, left); |
4411 DeoptimizeIf(vs, instr, "overflow"); | 4422 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
4412 } else { | 4423 } else { |
4413 __ Add(result, left, left); | 4424 __ Add(result, left, left); |
4414 } | 4425 } |
4415 break; | 4426 break; |
4416 | 4427 |
4417 default: | 4428 default: |
4418 // Multiplication by constant powers of two (and some related values) | 4429 // Multiplication by constant powers of two (and some related values) |
4419 // can be done efficiently with shifted operands. | 4430 // can be done efficiently with shifted operands. |
4420 int32_t right_abs = Abs(right); | 4431 int32_t right_abs = Abs(right); |
4421 | 4432 |
4422 if (base::bits::IsPowerOfTwo32(right_abs)) { | 4433 if (base::bits::IsPowerOfTwo32(right_abs)) { |
4423 int right_log2 = WhichPowerOf2(right_abs); | 4434 int right_log2 = WhichPowerOf2(right_abs); |
4424 | 4435 |
4425 if (can_overflow) { | 4436 if (can_overflow) { |
4426 Register scratch = result; | 4437 Register scratch = result; |
4427 DCHECK(!AreAliased(scratch, left)); | 4438 DCHECK(!AreAliased(scratch, left)); |
4428 __ Cls(scratch, left); | 4439 __ Cls(scratch, left); |
4429 __ Cmp(scratch, right_log2); | 4440 __ Cmp(scratch, right_log2); |
4430 DeoptimizeIf(lt, instr, "overflow"); | 4441 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow); |
4431 } | 4442 } |
4432 | 4443 |
4433 if (right >= 0) { | 4444 if (right >= 0) { |
4434 // result = left << log2(right) | 4445 // result = left << log2(right) |
4435 __ Lsl(result, left, right_log2); | 4446 __ Lsl(result, left, right_log2); |
4436 } else { | 4447 } else { |
4437 // result = -left << log2(-right) | 4448 // result = -left << log2(-right) |
4438 if (can_overflow) { | 4449 if (can_overflow) { |
4439 __ Negs(result, Operand(left, LSL, right_log2)); | 4450 __ Negs(result, Operand(left, LSL, right_log2)); |
4440 DeoptimizeIf(vs, instr, "overflow"); | 4451 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
4441 } else { | 4452 } else { |
4442 __ Neg(result, Operand(left, LSL, right_log2)); | 4453 __ Neg(result, Operand(left, LSL, right_log2)); |
4443 } | 4454 } |
4444 } | 4455 } |
4445 return; | 4456 return; |
4446 } | 4457 } |
4447 | 4458 |
4448 | 4459 |
4449 // For the following cases, we could perform a conservative overflow check | 4460 // For the following cases, we could perform a conservative overflow check |
4450 // with CLS as above. However the few cycles saved are likely not worth | 4461 // with CLS as above. However the few cycles saved are likely not worth |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4488 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 4499 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
4489 | 4500 |
4490 if (bailout_on_minus_zero && !left.Is(right)) { | 4501 if (bailout_on_minus_zero && !left.Is(right)) { |
4491 // If one operand is zero and the other is negative, the result is -0. | 4502 // If one operand is zero and the other is negative, the result is -0. |
4492 // - Set Z (eq) if either left or right, or both, are 0. | 4503 // - Set Z (eq) if either left or right, or both, are 0. |
4493 __ Cmp(left, 0); | 4504 __ Cmp(left, 0); |
4494 __ Ccmp(right, 0, ZFlag, ne); | 4505 __ Ccmp(right, 0, ZFlag, ne); |
4495 // - If so (eq), set N (mi) if left + right is negative. | 4506 // - If so (eq), set N (mi) if left + right is negative. |
4496 // - Otherwise, clear N. | 4507 // - Otherwise, clear N. |
4497 __ Ccmn(left, right, NoFlag, eq); | 4508 __ Ccmn(left, right, NoFlag, eq); |
4498 DeoptimizeIf(mi, instr, "minus zero"); | 4509 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
4499 } | 4510 } |
4500 | 4511 |
4501 if (can_overflow) { | 4512 if (can_overflow) { |
4502 __ Smull(result.X(), left, right); | 4513 __ Smull(result.X(), left, right); |
4503 __ Cmp(result.X(), Operand(result, SXTW)); | 4514 __ Cmp(result.X(), Operand(result, SXTW)); |
4504 DeoptimizeIf(ne, instr, "overflow"); | 4515 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
4505 } else { | 4516 } else { |
4506 __ Mul(result, left, right); | 4517 __ Mul(result, left, right); |
4507 } | 4518 } |
4508 } | 4519 } |
4509 | 4520 |
4510 | 4521 |
4511 void LCodeGen::DoMulS(LMulS* instr) { | 4522 void LCodeGen::DoMulS(LMulS* instr) { |
4512 Register result = ToRegister(instr->result()); | 4523 Register result = ToRegister(instr->result()); |
4513 Register left = ToRegister(instr->left()); | 4524 Register left = ToRegister(instr->left()); |
4514 Register right = ToRegister(instr->right()); | 4525 Register right = ToRegister(instr->right()); |
4515 | 4526 |
4516 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 4527 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
4517 bool bailout_on_minus_zero = | 4528 bool bailout_on_minus_zero = |
4518 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 4529 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
4519 | 4530 |
4520 if (bailout_on_minus_zero && !left.Is(right)) { | 4531 if (bailout_on_minus_zero && !left.Is(right)) { |
4521 // If one operand is zero and the other is negative, the result is -0. | 4532 // If one operand is zero and the other is negative, the result is -0. |
4522 // - Set Z (eq) if either left or right, or both, are 0. | 4533 // - Set Z (eq) if either left or right, or both, are 0. |
4523 __ Cmp(left, 0); | 4534 __ Cmp(left, 0); |
4524 __ Ccmp(right, 0, ZFlag, ne); | 4535 __ Ccmp(right, 0, ZFlag, ne); |
4525 // - If so (eq), set N (mi) if left + right is negative. | 4536 // - If so (eq), set N (mi) if left + right is negative. |
4526 // - Otherwise, clear N. | 4537 // - Otherwise, clear N. |
4527 __ Ccmn(left, right, NoFlag, eq); | 4538 __ Ccmn(left, right, NoFlag, eq); |
4528 DeoptimizeIf(mi, instr, "minus zero"); | 4539 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
4529 } | 4540 } |
4530 | 4541 |
4531 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); | 4542 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); |
4532 if (can_overflow) { | 4543 if (can_overflow) { |
4533 __ Smulh(result, left, right); | 4544 __ Smulh(result, left, right); |
4534 __ Cmp(result, Operand(result.W(), SXTW)); | 4545 __ Cmp(result, Operand(result.W(), SXTW)); |
4535 __ SmiTag(result); | 4546 __ SmiTag(result); |
4536 DeoptimizeIf(ne, instr, "overflow"); | 4547 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
4537 } else { | 4548 } else { |
4538 if (AreAliased(result, left, right)) { | 4549 if (AreAliased(result, left, right)) { |
4539 // All three registers are the same: half untag the input and then | 4550 // All three registers are the same: half untag the input and then |
4540 // multiply, giving a tagged result. | 4551 // multiply, giving a tagged result. |
4541 STATIC_ASSERT((kSmiShift % 2) == 0); | 4552 STATIC_ASSERT((kSmiShift % 2) == 0); |
4542 __ Asr(result, left, kSmiShift / 2); | 4553 __ Asr(result, left, kSmiShift / 2); |
4543 __ Mul(result, result, result); | 4554 __ Mul(result, result, result); |
4544 } else if (result.Is(left) && !left.Is(right)) { | 4555 } else if (result.Is(left) && !left.Is(right)) { |
4545 // Registers result and left alias, right is distinct: untag left into | 4556 // Registers result and left alias, right is distinct: untag left into |
4546 // result, and then multiply by right, giving a tagged result. | 4557 // result, and then multiply by right, giving a tagged result. |
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4702 // Heap number map check. | 4713 // Heap number map check. |
4703 if (can_convert_undefined_to_nan) { | 4714 if (can_convert_undefined_to_nan) { |
4704 __ JumpIfNotHeapNumber(input, &convert_undefined); | 4715 __ JumpIfNotHeapNumber(input, &convert_undefined); |
4705 } else { | 4716 } else { |
4706 DeoptimizeIfNotHeapNumber(input, instr); | 4717 DeoptimizeIfNotHeapNumber(input, instr); |
4707 } | 4718 } |
4708 | 4719 |
4709 // Load heap number. | 4720 // Load heap number. |
4710 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); | 4721 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); |
4711 if (instr->hydrogen()->deoptimize_on_minus_zero()) { | 4722 if (instr->hydrogen()->deoptimize_on_minus_zero()) { |
4712 DeoptimizeIfMinusZero(result, instr, "minus zero"); | 4723 DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero); |
4713 } | 4724 } |
4714 __ B(&done); | 4725 __ B(&done); |
4715 | 4726 |
4716 if (can_convert_undefined_to_nan) { | 4727 if (can_convert_undefined_to_nan) { |
4717 __ Bind(&convert_undefined); | 4728 __ Bind(&convert_undefined); |
4718 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, | 4729 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, |
4719 "not a heap number/undefined"); | 4730 Deoptimizer::kNotAHeapNumberUndefined); |
4720 | 4731 |
4721 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4732 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
4722 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | 4733 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
4723 __ B(&done); | 4734 __ B(&done); |
4724 } | 4735 } |
4725 | 4736 |
4726 } else { | 4737 } else { |
4727 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4738 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
4728 // Fall through to load_smi. | 4739 // Fall through to load_smi. |
4729 } | 4740 } |
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4903 } | 4914 } |
4904 } | 4915 } |
4905 | 4916 |
4906 | 4917 |
4907 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4918 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
4908 HChange* hchange = instr->hydrogen(); | 4919 HChange* hchange = instr->hydrogen(); |
4909 Register input = ToRegister(instr->value()); | 4920 Register input = ToRegister(instr->value()); |
4910 Register output = ToRegister(instr->result()); | 4921 Register output = ToRegister(instr->result()); |
4911 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4922 if (hchange->CheckFlag(HValue::kCanOverflow) && |
4912 hchange->value()->CheckFlag(HValue::kUint32)) { | 4923 hchange->value()->CheckFlag(HValue::kUint32)) { |
4913 DeoptimizeIfNegative(input.W(), instr, "overflow"); | 4924 DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow); |
4914 } | 4925 } |
4915 __ SmiTag(output, input); | 4926 __ SmiTag(output, input); |
4916 } | 4927 } |
4917 | 4928 |
4918 | 4929 |
4919 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4930 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
4920 Register input = ToRegister(instr->value()); | 4931 Register input = ToRegister(instr->value()); |
4921 Register result = ToRegister(instr->result()); | 4932 Register result = ToRegister(instr->result()); |
4922 Label done, untag; | 4933 Label done, untag; |
4923 | 4934 |
4924 if (instr->needs_check()) { | 4935 if (instr->needs_check()) { |
4925 DeoptimizeIfNotSmi(input, instr, "not a Smi"); | 4936 DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi); |
4926 } | 4937 } |
4927 | 4938 |
4928 __ Bind(&untag); | 4939 __ Bind(&untag); |
4929 __ SmiUntag(result, input); | 4940 __ SmiUntag(result, input); |
4930 __ Bind(&done); | 4941 __ Bind(&done); |
4931 } | 4942 } |
4932 | 4943 |
4933 | 4944 |
4934 void LCodeGen::DoShiftI(LShiftI* instr) { | 4945 void LCodeGen::DoShiftI(LShiftI* instr) { |
4935 LOperand* right_op = instr->right(); | 4946 LOperand* right_op = instr->right(); |
4936 Register left = ToRegister32(instr->left()); | 4947 Register left = ToRegister32(instr->left()); |
4937 Register result = ToRegister32(instr->result()); | 4948 Register result = ToRegister32(instr->result()); |
4938 | 4949 |
4939 if (right_op->IsRegister()) { | 4950 if (right_op->IsRegister()) { |
4940 Register right = ToRegister32(instr->right()); | 4951 Register right = ToRegister32(instr->right()); |
4941 switch (instr->op()) { | 4952 switch (instr->op()) { |
4942 case Token::ROR: __ Ror(result, left, right); break; | 4953 case Token::ROR: __ Ror(result, left, right); break; |
4943 case Token::SAR: __ Asr(result, left, right); break; | 4954 case Token::SAR: __ Asr(result, left, right); break; |
4944 case Token::SHL: __ Lsl(result, left, right); break; | 4955 case Token::SHL: __ Lsl(result, left, right); break; |
4945 case Token::SHR: | 4956 case Token::SHR: |
4946 __ Lsr(result, left, right); | 4957 __ Lsr(result, left, right); |
4947 if (instr->can_deopt()) { | 4958 if (instr->can_deopt()) { |
4948 // If `left >>> right` >= 0x80000000, the result is not representable | 4959 // If `left >>> right` >= 0x80000000, the result is not representable |
4949 // in a signed 32-bit smi. | 4960 // in a signed 32-bit smi. |
4950 DeoptimizeIfNegative(result, instr, "negative value"); | 4961 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue); |
4951 } | 4962 } |
4952 break; | 4963 break; |
4953 default: UNREACHABLE(); | 4964 default: UNREACHABLE(); |
4954 } | 4965 } |
4955 } else { | 4966 } else { |
4956 DCHECK(right_op->IsConstantOperand()); | 4967 DCHECK(right_op->IsConstantOperand()); |
4957 int shift_count = JSShiftAmountFromLConstant(right_op); | 4968 int shift_count = JSShiftAmountFromLConstant(right_op); |
4958 if (shift_count == 0) { | 4969 if (shift_count == 0) { |
4959 if ((instr->op() == Token::SHR) && instr->can_deopt()) { | 4970 if ((instr->op() == Token::SHR) && instr->can_deopt()) { |
4960 DeoptimizeIfNegative(left, instr, "negative value"); | 4971 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue); |
4961 } | 4972 } |
4962 __ Mov(result, left, kDiscardForSameWReg); | 4973 __ Mov(result, left, kDiscardForSameWReg); |
4963 } else { | 4974 } else { |
4964 switch (instr->op()) { | 4975 switch (instr->op()) { |
4965 case Token::ROR: __ Ror(result, left, shift_count); break; | 4976 case Token::ROR: __ Ror(result, left, shift_count); break; |
4966 case Token::SAR: __ Asr(result, left, shift_count); break; | 4977 case Token::SAR: __ Asr(result, left, shift_count); break; |
4967 case Token::SHL: __ Lsl(result, left, shift_count); break; | 4978 case Token::SHL: __ Lsl(result, left, shift_count); break; |
4968 case Token::SHR: __ Lsr(result, left, shift_count); break; | 4979 case Token::SHR: __ Lsr(result, left, shift_count); break; |
4969 default: UNREACHABLE(); | 4980 default: UNREACHABLE(); |
4970 } | 4981 } |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5003 break; | 5014 break; |
5004 case Token::SHL: | 5015 case Token::SHL: |
5005 __ Lsl(result, left, result); | 5016 __ Lsl(result, left, result); |
5006 break; | 5017 break; |
5007 case Token::SHR: | 5018 case Token::SHR: |
5008 __ Lsr(result, left, result); | 5019 __ Lsr(result, left, result); |
5009 __ Bic(result, result, kSmiShiftMask); | 5020 __ Bic(result, result, kSmiShiftMask); |
5010 if (instr->can_deopt()) { | 5021 if (instr->can_deopt()) { |
5011 // If `left >>> right` >= 0x80000000, the result is not representable | 5022 // If `left >>> right` >= 0x80000000, the result is not representable |
5012 // in a signed 32-bit smi. | 5023 // in a signed 32-bit smi. |
5013 DeoptimizeIfNegative(result, instr, "negative value"); | 5024 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue); |
5014 } | 5025 } |
5015 break; | 5026 break; |
5016 default: UNREACHABLE(); | 5027 default: UNREACHABLE(); |
5017 } | 5028 } |
5018 } else { | 5029 } else { |
5019 DCHECK(right_op->IsConstantOperand()); | 5030 DCHECK(right_op->IsConstantOperand()); |
5020 int shift_count = JSShiftAmountFromLConstant(right_op); | 5031 int shift_count = JSShiftAmountFromLConstant(right_op); |
5021 if (shift_count == 0) { | 5032 if (shift_count == 0) { |
5022 if ((instr->op() == Token::SHR) && instr->can_deopt()) { | 5033 if ((instr->op() == Token::SHR) && instr->can_deopt()) { |
5023 DeoptimizeIfNegative(left, instr, "negative value"); | 5034 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue); |
5024 } | 5035 } |
5025 __ Mov(result, left); | 5036 __ Mov(result, left); |
5026 } else { | 5037 } else { |
5027 switch (instr->op()) { | 5038 switch (instr->op()) { |
5028 case Token::ROR: | 5039 case Token::ROR: |
5029 __ SmiUntag(result, left); | 5040 __ SmiUntag(result, left); |
5030 __ Ror(result.W(), result.W(), shift_count); | 5041 __ Ror(result.W(), result.W(), shift_count); |
5031 __ SmiTag(result); | 5042 __ SmiTag(result); |
5032 break; | 5043 break; |
5033 case Token::SAR: | 5044 case Token::SAR: |
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5141 Register context = ToRegister(instr->context()); | 5152 Register context = ToRegister(instr->context()); |
5142 Register value = ToRegister(instr->value()); | 5153 Register value = ToRegister(instr->value()); |
5143 Register scratch = ToRegister(instr->temp()); | 5154 Register scratch = ToRegister(instr->temp()); |
5144 MemOperand target = ContextMemOperand(context, instr->slot_index()); | 5155 MemOperand target = ContextMemOperand(context, instr->slot_index()); |
5145 | 5156 |
5146 Label skip_assignment; | 5157 Label skip_assignment; |
5147 | 5158 |
5148 if (instr->hydrogen()->RequiresHoleCheck()) { | 5159 if (instr->hydrogen()->RequiresHoleCheck()) { |
5149 __ Ldr(scratch, target); | 5160 __ Ldr(scratch, target); |
5150 if (instr->hydrogen()->DeoptimizesOnHole()) { | 5161 if (instr->hydrogen()->DeoptimizesOnHole()) { |
5151 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, "hole"); | 5162 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, |
| 5163 Deoptimizer::kHole); |
5152 } else { | 5164 } else { |
5153 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); | 5165 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); |
5154 } | 5166 } |
5155 } | 5167 } |
5156 | 5168 |
5157 __ Str(value, target); | 5169 __ Str(value, target); |
5158 if (instr->hydrogen()->NeedsWriteBarrier()) { | 5170 if (instr->hydrogen()->NeedsWriteBarrier()) { |
5159 SmiCheck check_needed = | 5171 SmiCheck check_needed = |
5160 instr->hydrogen()->value()->type().IsHeapObject() | 5172 instr->hydrogen()->value()->type().IsHeapObject() |
5161 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 5173 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
(...skipping 17 matching lines...) Expand all Loading... |
5179 // Load the cell. | 5191 // Load the cell. |
5180 __ Mov(cell, Operand(instr->hydrogen()->cell().handle())); | 5192 __ Mov(cell, Operand(instr->hydrogen()->cell().handle())); |
5181 | 5193 |
5182 // If the cell we are storing to contains the hole it could have | 5194 // If the cell we are storing to contains the hole it could have |
5183 // been deleted from the property dictionary. In that case, we need | 5195 // been deleted from the property dictionary. In that case, we need |
5184 // to update the property details in the property dictionary to mark | 5196 // to update the property details in the property dictionary to mark |
5185 // it as no longer deleted. We deoptimize in that case. | 5197 // it as no longer deleted. We deoptimize in that case. |
5186 if (instr->hydrogen()->RequiresHoleCheck()) { | 5198 if (instr->hydrogen()->RequiresHoleCheck()) { |
5187 Register payload = ToRegister(instr->temp2()); | 5199 Register payload = ToRegister(instr->temp2()); |
5188 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); | 5200 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
5189 DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, "hole"); | 5201 DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, |
| 5202 Deoptimizer::kHole); |
5190 } | 5203 } |
5191 | 5204 |
5192 // Store the value. | 5205 // Store the value. |
5193 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset)); | 5206 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset)); |
5194 // Cells are always rescanned, so no write barrier here. | 5207 // Cells are always rescanned, so no write barrier here. |
5195 } | 5208 } |
5196 | 5209 |
5197 | 5210 |
5198 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { | 5211 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { |
5199 Register ext_ptr = ToRegister(instr->elements()); | 5212 Register ext_ptr = ToRegister(instr->elements()); |
(...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5601 | 5614 |
5602 | 5615 |
5603 void LCodeGen::DoSubI(LSubI* instr) { | 5616 void LCodeGen::DoSubI(LSubI* instr) { |
5604 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 5617 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
5605 Register result = ToRegister32(instr->result()); | 5618 Register result = ToRegister32(instr->result()); |
5606 Register left = ToRegister32(instr->left()); | 5619 Register left = ToRegister32(instr->left()); |
5607 Operand right = ToShiftedRightOperand32(instr->right(), instr); | 5620 Operand right = ToShiftedRightOperand32(instr->right(), instr); |
5608 | 5621 |
5609 if (can_overflow) { | 5622 if (can_overflow) { |
5610 __ Subs(result, left, right); | 5623 __ Subs(result, left, right); |
5611 DeoptimizeIf(vs, instr, "overflow"); | 5624 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
5612 } else { | 5625 } else { |
5613 __ Sub(result, left, right); | 5626 __ Sub(result, left, right); |
5614 } | 5627 } |
5615 } | 5628 } |
5616 | 5629 |
5617 | 5630 |
5618 void LCodeGen::DoSubS(LSubS* instr) { | 5631 void LCodeGen::DoSubS(LSubS* instr) { |
5619 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 5632 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
5620 Register result = ToRegister(instr->result()); | 5633 Register result = ToRegister(instr->result()); |
5621 Register left = ToRegister(instr->left()); | 5634 Register left = ToRegister(instr->left()); |
5622 Operand right = ToOperand(instr->right()); | 5635 Operand right = ToOperand(instr->right()); |
5623 if (can_overflow) { | 5636 if (can_overflow) { |
5624 __ Subs(result, left, right); | 5637 __ Subs(result, left, right); |
5625 DeoptimizeIf(vs, instr, "overflow"); | 5638 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
5626 } else { | 5639 } else { |
5627 __ Sub(result, left, right); | 5640 __ Sub(result, left, right); |
5628 } | 5641 } |
5629 } | 5642 } |
5630 | 5643 |
5631 | 5644 |
5632 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, | 5645 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, |
5633 LOperand* value, | 5646 LOperand* value, |
5634 LOperand* temp1, | 5647 LOperand* temp1, |
5635 LOperand* temp2) { | 5648 LOperand* temp2) { |
(...skipping 20 matching lines...) Expand all Loading... |
5656 Register false_root = scratch1; | 5669 Register false_root = scratch1; |
5657 __ LoadTrueFalseRoots(true_root, false_root); | 5670 __ LoadTrueFalseRoots(true_root, false_root); |
5658 __ Cmp(input, true_root); | 5671 __ Cmp(input, true_root); |
5659 __ Cset(output, eq); | 5672 __ Cset(output, eq); |
5660 __ Ccmp(input, false_root, ZFlag, ne); | 5673 __ Ccmp(input, false_root, ZFlag, ne); |
5661 __ B(eq, &done); | 5674 __ B(eq, &done); |
5662 | 5675 |
5663 // Output contains zero, undefined is converted to zero for truncating | 5676 // Output contains zero, undefined is converted to zero for truncating |
5664 // conversions. | 5677 // conversions. |
5665 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, | 5678 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, |
5666 "not a heap number/undefined/true/false"); | 5679 Deoptimizer::kNotAHeapNumberUndefinedBoolean); |
5667 } else { | 5680 } else { |
5668 Register output = ToRegister32(instr->result()); | 5681 Register output = ToRegister32(instr->result()); |
5669 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); | 5682 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); |
5670 | 5683 |
5671 DeoptimizeIfNotHeapNumber(input, instr); | 5684 DeoptimizeIfNotHeapNumber(input, instr); |
5672 | 5685 |
5673 // A heap number: load value and convert to int32 using non-truncating | 5686 // A heap number: load value and convert to int32 using non-truncating |
5674 // function. If the result is out of range, branch to deoptimize. | 5687 // function. If the result is out of range, branch to deoptimize. |
5675 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); | 5688 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); |
5676 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); | 5689 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); |
5677 DeoptimizeIf(ne, instr, "lost precision or NaN"); | 5690 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
5678 | 5691 |
5679 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5692 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
5680 __ Cmp(output, 0); | 5693 __ Cmp(output, 0); |
5681 __ B(ne, &done); | 5694 __ B(ne, &done); |
5682 __ Fmov(scratch1, dbl_scratch1); | 5695 __ Fmov(scratch1, dbl_scratch1); |
5683 DeoptimizeIfNegative(scratch1, instr, "minus zero"); | 5696 DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero); |
5684 } | 5697 } |
5685 } | 5698 } |
5686 __ Bind(&done); | 5699 __ Bind(&done); |
5687 } | 5700 } |
5688 | 5701 |
5689 | 5702 |
5690 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 5703 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
5691 class DeferredTaggedToI: public LDeferredCode { | 5704 class DeferredTaggedToI: public LDeferredCode { |
5692 public: | 5705 public: |
5693 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 5706 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5814 } | 5827 } |
5815 | 5828 |
5816 | 5829 |
5817 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 5830 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
5818 Register object = ToRegister(instr->object()); | 5831 Register object = ToRegister(instr->object()); |
5819 Register temp1 = ToRegister(instr->temp1()); | 5832 Register temp1 = ToRegister(instr->temp1()); |
5820 Register temp2 = ToRegister(instr->temp2()); | 5833 Register temp2 = ToRegister(instr->temp2()); |
5821 | 5834 |
5822 Label no_memento_found; | 5835 Label no_memento_found; |
5823 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); | 5836 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); |
5824 DeoptimizeIf(eq, instr, "memento found"); | 5837 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); |
5825 __ Bind(&no_memento_found); | 5838 __ Bind(&no_memento_found); |
5826 } | 5839 } |
5827 | 5840 |
5828 | 5841 |
5829 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) { | 5842 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) { |
5830 DoubleRegister input = ToDoubleRegister(instr->value()); | 5843 DoubleRegister input = ToDoubleRegister(instr->value()); |
5831 Register result = ToRegister(instr->result()); | 5844 Register result = ToRegister(instr->result()); |
5832 __ TruncateDoubleToI(result, input); | 5845 __ TruncateDoubleToI(result, input); |
5833 if (instr->tag_result()) { | 5846 if (instr->tag_result()) { |
5834 __ SmiTag(result, result); | 5847 __ SmiTag(result, result); |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5939 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); | 5952 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); |
5940 } | 5953 } |
5941 | 5954 |
5942 | 5955 |
5943 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5956 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
5944 Register object = ToRegister(instr->value()); | 5957 Register object = ToRegister(instr->value()); |
5945 Register map = ToRegister(instr->map()); | 5958 Register map = ToRegister(instr->map()); |
5946 Register temp = ToRegister(instr->temp()); | 5959 Register temp = ToRegister(instr->temp()); |
5947 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | 5960 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
5948 __ Cmp(map, temp); | 5961 __ Cmp(map, temp); |
5949 DeoptimizeIf(ne, instr, "wrong map"); | 5962 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
5950 } | 5963 } |
5951 | 5964 |
5952 | 5965 |
5953 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { | 5966 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { |
5954 Register receiver = ToRegister(instr->receiver()); | 5967 Register receiver = ToRegister(instr->receiver()); |
5955 Register function = ToRegister(instr->function()); | 5968 Register function = ToRegister(instr->function()); |
5956 Register result = ToRegister(instr->result()); | 5969 Register result = ToRegister(instr->result()); |
5957 | 5970 |
5958 // If the receiver is null or undefined, we have to pass the global object as | 5971 // If the receiver is null or undefined, we have to pass the global object as |
5959 // a receiver to normal functions. Values have to be passed unchanged to | 5972 // a receiver to normal functions. Values have to be passed unchanged to |
(...skipping 13 matching lines...) Expand all Loading... |
5973 | 5986 |
5974 // Do not transform the receiver to object for builtins. | 5987 // Do not transform the receiver to object for builtins. |
5975 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver); | 5988 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver); |
5976 } | 5989 } |
5977 | 5990 |
5978 // Normal function. Replace undefined or null with global receiver. | 5991 // Normal function. Replace undefined or null with global receiver. |
5979 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object); | 5992 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object); |
5980 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); | 5993 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); |
5981 | 5994 |
5982 // Deoptimize if the receiver is not a JS object. | 5995 // Deoptimize if the receiver is not a JS object. |
5983 DeoptimizeIfSmi(receiver, instr, "Smi"); | 5996 DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi); |
5984 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE); | 5997 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE); |
5985 __ B(ge, ©_receiver); | 5998 __ B(ge, ©_receiver); |
5986 Deoptimize(instr, "not a JavaScript object"); | 5999 Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject); |
5987 | 6000 |
5988 __ Bind(&global_object); | 6001 __ Bind(&global_object); |
5989 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 6002 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
5990 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX)); | 6003 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX)); |
5991 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | 6004 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); |
5992 __ B(&done); | 6005 __ B(&done); |
5993 | 6006 |
5994 __ Bind(©_receiver); | 6007 __ Bind(©_receiver); |
5995 __ Mov(result, receiver); | 6008 __ Mov(result, receiver); |
5996 __ Bind(&done); | 6009 __ Bind(&done); |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6083 Handle<ScopeInfo> scope_info = instr->scope_info(); | 6096 Handle<ScopeInfo> scope_info = instr->scope_info(); |
6084 __ Push(scope_info); | 6097 __ Push(scope_info); |
6085 __ Push(ToRegister(instr->function())); | 6098 __ Push(ToRegister(instr->function())); |
6086 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 6099 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
6087 RecordSafepoint(Safepoint::kNoLazyDeopt); | 6100 RecordSafepoint(Safepoint::kNoLazyDeopt); |
6088 } | 6101 } |
6089 | 6102 |
6090 | 6103 |
6091 | 6104 |
6092 } } // namespace v8::internal | 6105 } } // namespace v8::internal |
OLD | NEW |