Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(79)

Side by Side Diff: src/arm64/lithium-codegen-arm64.cc

Issue 892843007: Revert of Externalize deoptimization reasons. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/arm64/lithium-codegen-arm64.h ('k') | src/assembler.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/arm64/lithium-codegen-arm64.h" 7 #include "src/arm64/lithium-codegen-arm64.h"
8 #include "src/arm64/lithium-gap-resolver-arm64.h" 8 #include "src/arm64/lithium-gap-resolver-arm64.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/code-factory.h" 10 #include "src/code-factory.h"
(...skipping 987 matching lines...) Expand 10 before | Expand all | Expand 10 after
998 998
999 for (int i = 0, length = inlined_closures->length(); i < length; i++) { 999 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
1000 DefineDeoptimizationLiteral(inlined_closures->at(i)); 1000 DefineDeoptimizationLiteral(inlined_closures->at(i));
1001 } 1001 }
1002 1002
1003 inlined_function_count_ = deoptimization_literals_.length(); 1003 inlined_function_count_ = deoptimization_literals_.length();
1004 } 1004 }
1005 1005
1006 1006
1007 void LCodeGen::DeoptimizeBranch( 1007 void LCodeGen::DeoptimizeBranch(
1008 LInstruction* instr, Deoptimizer::DeoptReason deopt_reason, 1008 LInstruction* instr, const char* detail, BranchType branch_type,
1009 BranchType branch_type, Register reg, int bit, 1009 Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
1010 Deoptimizer::BailoutType* override_bailout_type) {
1011 LEnvironment* environment = instr->environment(); 1010 LEnvironment* environment = instr->environment();
1012 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 1011 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1013 Deoptimizer::BailoutType bailout_type = 1012 Deoptimizer::BailoutType bailout_type =
1014 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; 1013 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
1015 1014
1016 if (override_bailout_type != NULL) { 1015 if (override_bailout_type != NULL) {
1017 bailout_type = *override_bailout_type; 1016 bailout_type = *override_bailout_type;
1018 } 1017 }
1019 1018
1020 DCHECK(environment->HasBeenRegistered()); 1019 DCHECK(environment->HasBeenRegistered());
(...skipping 30 matching lines...) Expand all
1051 } 1050 }
1052 1051
1053 if (info()->ShouldTrapOnDeopt()) { 1052 if (info()->ShouldTrapOnDeopt()) {
1054 Label dont_trap; 1053 Label dont_trap;
1055 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit); 1054 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1056 __ Debug("trap_on_deopt", __LINE__, BREAK); 1055 __ Debug("trap_on_deopt", __LINE__, BREAK);
1057 __ Bind(&dont_trap); 1056 __ Bind(&dont_trap);
1058 } 1057 }
1059 1058
1060 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), 1059 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
1061 instr->Mnemonic(), deopt_reason); 1060 instr->Mnemonic(), detail);
1062 DCHECK(info()->IsStub() || frame_is_built_); 1061 DCHECK(info()->IsStub() || frame_is_built_);
1063 // Go through jump table if we need to build frame, or restore caller doubles. 1062 // Go through jump table if we need to build frame, or restore caller doubles.
1064 if (branch_type == always && 1063 if (branch_type == always &&
1065 frame_is_built_ && !info()->saves_caller_doubles()) { 1064 frame_is_built_ && !info()->saves_caller_doubles()) {
1066 DeoptComment(reason); 1065 DeoptComment(reason);
1067 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 1066 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1068 } else { 1067 } else {
1069 Deoptimizer::JumpTableEntry* table_entry = 1068 Deoptimizer::JumpTableEntry* table_entry =
1070 new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type, 1069 new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
1071 !frame_is_built_); 1070 !frame_is_built_);
1072 // We often have several deopts to the same entry, reuse the last 1071 // We often have several deopts to the same entry, reuse the last
1073 // jump entry if this is the case. 1072 // jump entry if this is the case.
1074 if (jump_table_.is_empty() || 1073 if (jump_table_.is_empty() ||
1075 !table_entry->IsEquivalentTo(*jump_table_.last())) { 1074 !table_entry->IsEquivalentTo(*jump_table_.last())) {
1076 jump_table_.Add(table_entry, zone()); 1075 jump_table_.Add(table_entry, zone());
1077 } 1076 }
1078 __ B(&jump_table_.last()->label, branch_type, reg, bit); 1077 __ B(&jump_table_.last()->label, branch_type, reg, bit);
1079 } 1078 }
1080 } 1079 }
1081 1080
1082 1081
1083 void LCodeGen::Deoptimize(LInstruction* instr, 1082 void LCodeGen::Deoptimize(LInstruction* instr, const char* detail,
1084 Deoptimizer::DeoptReason deopt_reason,
1085 Deoptimizer::BailoutType* override_bailout_type) { 1083 Deoptimizer::BailoutType* override_bailout_type) {
1086 DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1, 1084 DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type);
1087 override_bailout_type);
1088 } 1085 }
1089 1086
1090 1087
1091 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, 1088 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
1092 Deoptimizer::DeoptReason deopt_reason) { 1089 const char* detail) {
1093 DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond)); 1090 DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond));
1094 } 1091 }
1095 1092
1096 1093
1097 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr, 1094 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
1098 Deoptimizer::DeoptReason deopt_reason) { 1095 const char* detail) {
1099 DeoptimizeBranch(instr, deopt_reason, reg_zero, rt); 1096 DeoptimizeBranch(instr, detail, reg_zero, rt);
1100 } 1097 }
1101 1098
1102 1099
1103 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr, 1100 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
1104 Deoptimizer::DeoptReason deopt_reason) { 1101 const char* detail) {
1105 DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt); 1102 DeoptimizeBranch(instr, detail, reg_not_zero, rt);
1106 } 1103 }
1107 1104
1108 1105
1109 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr, 1106 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
1110 Deoptimizer::DeoptReason deopt_reason) { 1107 const char* detail) {
1111 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; 1108 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1112 DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason); 1109 DeoptimizeIfBitSet(rt, sign_bit, instr, detail);
1113 } 1110 }
1114 1111
1115 1112
1116 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr, 1113 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
1117 Deoptimizer::DeoptReason deopt_reason) { 1114 const char* detail) {
1118 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason); 1115 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail);
1119 } 1116 }
1120 1117
1121 1118
1122 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr, 1119 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
1123 Deoptimizer::DeoptReason deopt_reason) { 1120 const char* detail) {
1124 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason); 1121 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail);
1125 } 1122 }
1126 1123
1127 1124
1128 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, 1125 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
1129 LInstruction* instr, 1126 LInstruction* instr, const char* detail) {
1130 Deoptimizer::DeoptReason deopt_reason) {
1131 __ CompareRoot(rt, index); 1127 __ CompareRoot(rt, index);
1132 DeoptimizeIf(eq, instr, deopt_reason); 1128 DeoptimizeIf(eq, instr, detail);
1133 } 1129 }
1134 1130
1135 1131
1136 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, 1132 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
1137 LInstruction* instr, 1133 LInstruction* instr, const char* detail) {
1138 Deoptimizer::DeoptReason deopt_reason) {
1139 __ CompareRoot(rt, index); 1134 __ CompareRoot(rt, index);
1140 DeoptimizeIf(ne, instr, deopt_reason); 1135 DeoptimizeIf(ne, instr, detail);
1141 } 1136 }
1142 1137
1143 1138
1144 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, 1139 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
1145 Deoptimizer::DeoptReason deopt_reason) { 1140 const char* detail) {
1146 __ TestForMinusZero(input); 1141 __ TestForMinusZero(input);
1147 DeoptimizeIf(vs, instr, deopt_reason); 1142 DeoptimizeIf(vs, instr, detail);
1148 } 1143 }
1149 1144
1150 1145
1151 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) { 1146 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
1152 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex); 1147 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
1153 DeoptimizeIf(ne, instr, Deoptimizer::kNotHeapNumber); 1148 DeoptimizeIf(ne, instr, "not heap number");
1154 } 1149 }
1155 1150
1156 1151
1157 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, 1152 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
1158 Deoptimizer::DeoptReason deopt_reason) { 1153 const char* detail) {
1159 DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit); 1154 DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit);
1160 } 1155 }
1161 1156
1162 1157
1163 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, 1158 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
1164 Deoptimizer::DeoptReason deopt_reason) { 1159 const char* detail) {
1165 DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit); 1160 DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit);
1166 } 1161 }
1167 1162
1168 1163
1169 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 1164 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1170 if (!info()->IsStub()) { 1165 if (!info()->IsStub()) {
1171 // Ensure that we have enough space after the previous lazy-bailout 1166 // Ensure that we have enough space after the previous lazy-bailout
1172 // instruction for patching the code here. 1167 // instruction for patching the code here.
1173 intptr_t current_pc = masm()->pc_offset(); 1168 intptr_t current_pc = masm()->pc_offset();
1174 1169
1175 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { 1170 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
(...skipping 353 matching lines...) Expand 10 before | Expand all | Expand 10 after
1529 1524
1530 1525
1531 void LCodeGen::DoAddI(LAddI* instr) { 1526 void LCodeGen::DoAddI(LAddI* instr) {
1532 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1527 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1533 Register result = ToRegister32(instr->result()); 1528 Register result = ToRegister32(instr->result());
1534 Register left = ToRegister32(instr->left()); 1529 Register left = ToRegister32(instr->left());
1535 Operand right = ToShiftedRightOperand32(instr->right(), instr); 1530 Operand right = ToShiftedRightOperand32(instr->right(), instr);
1536 1531
1537 if (can_overflow) { 1532 if (can_overflow) {
1538 __ Adds(result, left, right); 1533 __ Adds(result, left, right);
1539 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 1534 DeoptimizeIf(vs, instr, "overflow");
1540 } else { 1535 } else {
1541 __ Add(result, left, right); 1536 __ Add(result, left, right);
1542 } 1537 }
1543 } 1538 }
1544 1539
1545 1540
1546 void LCodeGen::DoAddS(LAddS* instr) { 1541 void LCodeGen::DoAddS(LAddS* instr) {
1547 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1542 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1548 Register result = ToRegister(instr->result()); 1543 Register result = ToRegister(instr->result());
1549 Register left = ToRegister(instr->left()); 1544 Register left = ToRegister(instr->left());
1550 Operand right = ToOperand(instr->right()); 1545 Operand right = ToOperand(instr->right());
1551 if (can_overflow) { 1546 if (can_overflow) {
1552 __ Adds(result, left, right); 1547 __ Adds(result, left, right);
1553 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 1548 DeoptimizeIf(vs, instr, "overflow");
1554 } else { 1549 } else {
1555 __ Add(result, left, right); 1550 __ Add(result, left, right);
1556 } 1551 }
1557 } 1552 }
1558 1553
1559 1554
1560 void LCodeGen::DoAllocate(LAllocate* instr) { 1555 void LCodeGen::DoAllocate(LAllocate* instr) {
1561 class DeferredAllocate: public LDeferredCode { 1556 class DeferredAllocate: public LDeferredCode {
1562 public: 1557 public:
1563 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) 1558 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
1669 Register scratch = x5; 1664 Register scratch = x5;
1670 DCHECK(receiver.Is(x0)); // Used for parameter count. 1665 DCHECK(receiver.Is(x0)); // Used for parameter count.
1671 DCHECK(function.Is(x1)); // Required by InvokeFunction. 1666 DCHECK(function.Is(x1)); // Required by InvokeFunction.
1672 DCHECK(ToRegister(instr->result()).Is(x0)); 1667 DCHECK(ToRegister(instr->result()).Is(x0));
1673 DCHECK(instr->IsMarkedAsCall()); 1668 DCHECK(instr->IsMarkedAsCall());
1674 1669
1675 // Copy the arguments to this function possibly from the 1670 // Copy the arguments to this function possibly from the
1676 // adaptor frame below it. 1671 // adaptor frame below it.
1677 const uint32_t kArgumentsLimit = 1 * KB; 1672 const uint32_t kArgumentsLimit = 1 * KB;
1678 __ Cmp(length, kArgumentsLimit); 1673 __ Cmp(length, kArgumentsLimit);
1679 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments); 1674 DeoptimizeIf(hi, instr, "too many arguments");
1680 1675
1681 // Push the receiver and use the register to keep the original 1676 // Push the receiver and use the register to keep the original
1682 // number of arguments. 1677 // number of arguments.
1683 __ Push(receiver); 1678 __ Push(receiver);
1684 Register argc = receiver; 1679 Register argc = receiver;
1685 receiver = NoReg; 1680 receiver = NoReg;
1686 __ Sxtw(argc, length); 1681 __ Sxtw(argc, length);
1687 // The arguments are at a one pointer size offset from elements. 1682 // The arguments are at a one pointer size offset from elements.
1688 __ Add(elements, elements, 1 * kPointerSize); 1683 __ Add(elements, elements, 1 * kPointerSize);
1689 1684
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
1851 __ Cmp(length, index); 1846 __ Cmp(length, index);
1852 cond = CommuteCondition(cond); 1847 cond = CommuteCondition(cond);
1853 } else { 1848 } else {
1854 Register index = ToRegister32(instr->index()); 1849 Register index = ToRegister32(instr->index());
1855 Operand length = ToOperand32(instr->length()); 1850 Operand length = ToOperand32(instr->length());
1856 __ Cmp(index, length); 1851 __ Cmp(index, length);
1857 } 1852 }
1858 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 1853 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
1859 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); 1854 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
1860 } else { 1855 } else {
1861 DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds); 1856 DeoptimizeIf(cond, instr, "out of bounds");
1862 } 1857 }
1863 } 1858 }
1864 1859
1865 1860
1866 void LCodeGen::DoBranch(LBranch* instr) { 1861 void LCodeGen::DoBranch(LBranch* instr) {
1867 Representation r = instr->hydrogen()->value()->representation(); 1862 Representation r = instr->hydrogen()->value()->representation();
1868 Label* true_label = instr->TrueLabel(chunk_); 1863 Label* true_label = instr->TrueLabel(chunk_);
1869 Label* false_label = instr->FalseLabel(chunk_); 1864 Label* false_label = instr->FalseLabel(chunk_);
1870 1865
1871 if (r.IsInteger32()) { 1866 if (r.IsInteger32()) {
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1930 value, Heap::kNullValueRootIndex, false_label); 1925 value, Heap::kNullValueRootIndex, false_label);
1931 } 1926 }
1932 1927
1933 if (expected.Contains(ToBooleanStub::SMI)) { 1928 if (expected.Contains(ToBooleanStub::SMI)) {
1934 // Smis: 0 -> false, all other -> true. 1929 // Smis: 0 -> false, all other -> true.
1935 DCHECK(Smi::FromInt(0) == 0); 1930 DCHECK(Smi::FromInt(0) == 0);
1936 __ Cbz(value, false_label); 1931 __ Cbz(value, false_label);
1937 __ JumpIfSmi(value, true_label); 1932 __ JumpIfSmi(value, true_label);
1938 } else if (expected.NeedsMap()) { 1933 } else if (expected.NeedsMap()) {
1939 // If we need a map later and have a smi, deopt. 1934 // If we need a map later and have a smi, deopt.
1940 DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi); 1935 DeoptimizeIfSmi(value, instr, "Smi");
1941 } 1936 }
1942 1937
1943 Register map = NoReg; 1938 Register map = NoReg;
1944 Register scratch = NoReg; 1939 Register scratch = NoReg;
1945 1940
1946 if (expected.NeedsMap()) { 1941 if (expected.NeedsMap()) {
1947 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); 1942 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
1948 map = ToRegister(instr->temp1()); 1943 map = ToRegister(instr->temp1());
1949 scratch = ToRegister(instr->temp2()); 1944 scratch = ToRegister(instr->temp2());
1950 1945
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
1991 // If we got a NaN (overflow bit is set), jump to the false branch. 1986 // If we got a NaN (overflow bit is set), jump to the false branch.
1992 __ B(vs, false_label); 1987 __ B(vs, false_label);
1993 __ B(eq, false_label); 1988 __ B(eq, false_label);
1994 __ B(true_label); 1989 __ B(true_label);
1995 __ Bind(&not_heap_number); 1990 __ Bind(&not_heap_number);
1996 } 1991 }
1997 1992
1998 if (!expected.IsGeneric()) { 1993 if (!expected.IsGeneric()) {
1999 // We've seen something for the first time -> deopt. 1994 // We've seen something for the first time -> deopt.
2000 // This can only happen if we are not generic already. 1995 // This can only happen if we are not generic already.
2001 Deoptimize(instr, Deoptimizer::kUnexpectedObject); 1996 Deoptimize(instr, "unexpected object");
2002 } 1997 }
2003 } 1998 }
2004 } 1999 }
2005 } 2000 }
2006 2001
2007 2002
2008 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 2003 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2009 int formal_parameter_count, int arity, 2004 int formal_parameter_count, int arity,
2010 LInstruction* instr) { 2005 LInstruction* instr) {
2011 bool dont_adapt_arguments = 2006 bool dont_adapt_arguments =
(...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after
2197 Register temp = ToRegister(instr->temp()); 2192 Register temp = ToRegister(instr->temp());
2198 { 2193 {
2199 PushSafepointRegistersScope scope(this); 2194 PushSafepointRegistersScope scope(this);
2200 __ Push(object); 2195 __ Push(object);
2201 __ Mov(cp, 0); 2196 __ Mov(cp, 0);
2202 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 2197 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2203 RecordSafepointWithRegisters( 2198 RecordSafepointWithRegisters(
2204 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); 2199 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2205 __ StoreToSafepointRegisterSlot(x0, temp); 2200 __ StoreToSafepointRegisterSlot(x0, temp);
2206 } 2201 }
2207 DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed); 2202 DeoptimizeIfSmi(temp, instr, "instance migration failed");
2208 } 2203 }
2209 2204
2210 2205
2211 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 2206 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2212 class DeferredCheckMaps: public LDeferredCode { 2207 class DeferredCheckMaps: public LDeferredCode {
2213 public: 2208 public:
2214 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 2209 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2215 : LDeferredCode(codegen), instr_(instr), object_(object) { 2210 : LDeferredCode(codegen), instr_(instr), object_(object) {
2216 SetExit(check_maps()); 2211 SetExit(check_maps());
2217 } 2212 }
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
2252 __ CompareMap(map_reg, map); 2247 __ CompareMap(map_reg, map);
2253 __ B(eq, &success); 2248 __ B(eq, &success);
2254 } 2249 }
2255 Handle<Map> map = maps->at(maps->size() - 1).handle(); 2250 Handle<Map> map = maps->at(maps->size() - 1).handle();
2256 __ CompareMap(map_reg, map); 2251 __ CompareMap(map_reg, map);
2257 2252
2258 // We didn't match a map. 2253 // We didn't match a map.
2259 if (instr->hydrogen()->HasMigrationTarget()) { 2254 if (instr->hydrogen()->HasMigrationTarget()) {
2260 __ B(ne, deferred->entry()); 2255 __ B(ne, deferred->entry());
2261 } else { 2256 } else {
2262 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); 2257 DeoptimizeIf(ne, instr, "wrong map");
2263 } 2258 }
2264 2259
2265 __ Bind(&success); 2260 __ Bind(&success);
2266 } 2261 }
2267 2262
2268 2263
2269 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 2264 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2270 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2265 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2271 DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi); 2266 DeoptimizeIfSmi(ToRegister(instr->value()), instr, "Smi");
2272 } 2267 }
2273 } 2268 }
2274 2269
2275 2270
2276 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 2271 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2277 Register value = ToRegister(instr->value()); 2272 Register value = ToRegister(instr->value());
2278 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value)); 2273 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
2279 DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi); 2274 DeoptimizeIfNotSmi(value, instr, "not a Smi");
2280 } 2275 }
2281 2276
2282 2277
2283 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 2278 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2284 Register input = ToRegister(instr->value()); 2279 Register input = ToRegister(instr->value());
2285 Register scratch = ToRegister(instr->temp()); 2280 Register scratch = ToRegister(instr->temp());
2286 2281
2287 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 2282 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2288 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 2283 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2289 2284
2290 if (instr->hydrogen()->is_interval_check()) { 2285 if (instr->hydrogen()->is_interval_check()) {
2291 InstanceType first, last; 2286 InstanceType first, last;
2292 instr->hydrogen()->GetCheckInterval(&first, &last); 2287 instr->hydrogen()->GetCheckInterval(&first, &last);
2293 2288
2294 __ Cmp(scratch, first); 2289 __ Cmp(scratch, first);
2295 if (first == last) { 2290 if (first == last) {
2296 // If there is only one type in the interval check for equality. 2291 // If there is only one type in the interval check for equality.
2297 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); 2292 DeoptimizeIf(ne, instr, "wrong instance type");
2298 } else if (last == LAST_TYPE) { 2293 } else if (last == LAST_TYPE) {
2299 // We don't need to compare with the higher bound of the interval. 2294 // We don't need to compare with the higher bound of the interval.
2300 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType); 2295 DeoptimizeIf(lo, instr, "wrong instance type");
2301 } else { 2296 } else {
2302 // If we are below the lower bound, set the C flag and clear the Z flag 2297 // If we are below the lower bound, set the C flag and clear the Z flag
2303 // to force a deopt. 2298 // to force a deopt.
2304 __ Ccmp(scratch, last, CFlag, hs); 2299 __ Ccmp(scratch, last, CFlag, hs);
2305 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType); 2300 DeoptimizeIf(hi, instr, "wrong instance type");
2306 } 2301 }
2307 } else { 2302 } else {
2308 uint8_t mask; 2303 uint8_t mask;
2309 uint8_t tag; 2304 uint8_t tag;
2310 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 2305 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2311 2306
2312 if (base::bits::IsPowerOfTwo32(mask)) { 2307 if (base::bits::IsPowerOfTwo32(mask)) {
2313 DCHECK((tag == 0) || (tag == mask)); 2308 DCHECK((tag == 0) || (tag == mask));
2314 if (tag == 0) { 2309 if (tag == 0) {
2315 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr, 2310 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
2316 Deoptimizer::kWrongInstanceType); 2311 "wrong instance type");
2317 } else { 2312 } else {
2318 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr, 2313 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
2319 Deoptimizer::kWrongInstanceType); 2314 "wrong instance type");
2320 } 2315 }
2321 } else { 2316 } else {
2322 if (tag == 0) { 2317 if (tag == 0) {
2323 __ Tst(scratch, mask); 2318 __ Tst(scratch, mask);
2324 } else { 2319 } else {
2325 __ And(scratch, scratch, mask); 2320 __ And(scratch, scratch, mask);
2326 __ Cmp(scratch, tag); 2321 __ Cmp(scratch, tag);
2327 } 2322 }
2328 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); 2323 DeoptimizeIf(ne, instr, "wrong instance type");
2329 } 2324 }
2330 } 2325 }
2331 } 2326 }
2332 2327
2333 2328
2334 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 2329 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2335 DoubleRegister input = ToDoubleRegister(instr->unclamped()); 2330 DoubleRegister input = ToDoubleRegister(instr->unclamped());
2336 Register result = ToRegister32(instr->result()); 2331 Register result = ToRegister32(instr->result());
2337 __ ClampDoubleToUint8(result, input, double_scratch()); 2332 __ ClampDoubleToUint8(result, input, double_scratch());
2338 } 2333 }
(...skipping 19 matching lines...) Expand all
2358 __ B(&done); 2353 __ B(&done);
2359 2354
2360 __ Bind(&is_not_smi); 2355 __ Bind(&is_not_smi);
2361 2356
2362 // Check for heap number. 2357 // Check for heap number.
2363 Label is_heap_number; 2358 Label is_heap_number;
2364 __ JumpIfHeapNumber(input, &is_heap_number); 2359 __ JumpIfHeapNumber(input, &is_heap_number);
2365 2360
2366 // Check for undefined. Undefined is coverted to zero for clamping conversion. 2361 // Check for undefined. Undefined is coverted to zero for clamping conversion.
2367 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, 2362 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
2368 Deoptimizer::kNotAHeapNumberUndefined); 2363 "not a heap number/undefined");
2369 __ Mov(result, 0); 2364 __ Mov(result, 0);
2370 __ B(&done); 2365 __ B(&done);
2371 2366
2372 // Heap number case. 2367 // Heap number case.
2373 __ Bind(&is_heap_number); 2368 __ Bind(&is_heap_number);
2374 DoubleRegister dbl_scratch = double_scratch(); 2369 DoubleRegister dbl_scratch = double_scratch();
2375 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1()); 2370 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
2376 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); 2371 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2377 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); 2372 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2378 2373
(...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after
2663 if (isolate()->heap()->InNewSpace(*object)) { 2658 if (isolate()->heap()->InNewSpace(*object)) {
2664 UseScratchRegisterScope temps(masm()); 2659 UseScratchRegisterScope temps(masm());
2665 Register temp = temps.AcquireX(); 2660 Register temp = temps.AcquireX();
2666 Handle<Cell> cell = isolate()->factory()->NewCell(object); 2661 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2667 __ Mov(temp, Operand(Handle<Object>(cell))); 2662 __ Mov(temp, Operand(Handle<Object>(cell)));
2668 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); 2663 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2669 __ Cmp(reg, temp); 2664 __ Cmp(reg, temp);
2670 } else { 2665 } else {
2671 __ Cmp(reg, Operand(object)); 2666 __ Cmp(reg, Operand(object));
2672 } 2667 }
2673 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); 2668 DeoptimizeIf(ne, instr, "value mismatch");
2674 } 2669 }
2675 2670
2676 2671
2677 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 2672 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2678 last_lazy_deopt_pc_ = masm()->pc_offset(); 2673 last_lazy_deopt_pc_ = masm()->pc_offset();
2679 DCHECK(instr->HasEnvironment()); 2674 DCHECK(instr->HasEnvironment());
2680 LEnvironment* env = instr->environment(); 2675 LEnvironment* env = instr->environment();
2681 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 2676 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2682 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2677 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2683 } 2678 }
2684 2679
2685 2680
2686 void LCodeGen::DoDateField(LDateField* instr) { 2681 void LCodeGen::DoDateField(LDateField* instr) {
2687 Register object = ToRegister(instr->date()); 2682 Register object = ToRegister(instr->date());
2688 Register result = ToRegister(instr->result()); 2683 Register result = ToRegister(instr->result());
2689 Register temp1 = x10; 2684 Register temp1 = x10;
2690 Register temp2 = x11; 2685 Register temp2 = x11;
2691 Smi* index = instr->index(); 2686 Smi* index = instr->index();
2692 Label runtime, done; 2687 Label runtime, done;
2693 2688
2694 DCHECK(object.is(result) && object.Is(x0)); 2689 DCHECK(object.is(result) && object.Is(x0));
2695 DCHECK(instr->IsMarkedAsCall()); 2690 DCHECK(instr->IsMarkedAsCall());
2696 2691
2697 DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi); 2692 DeoptimizeIfSmi(object, instr, "Smi");
2698 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE); 2693 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2699 DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject); 2694 DeoptimizeIf(ne, instr, "not a date object");
2700 2695
2701 if (index->value() == 0) { 2696 if (index->value() == 0) {
2702 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); 2697 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2703 } else { 2698 } else {
2704 if (index->value() < JSDate::kFirstUncachedField) { 2699 if (index->value() < JSDate::kFirstUncachedField) {
2705 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); 2700 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2706 __ Mov(temp1, Operand(stamp)); 2701 __ Mov(temp1, Operand(stamp));
2707 __ Ldr(temp1, MemOperand(temp1)); 2702 __ Ldr(temp1, MemOperand(temp1));
2708 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset)); 2703 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2709 __ Cmp(temp1, temp2); 2704 __ Cmp(temp1, temp2);
(...skipping 29 matching lines...) Expand all
2739 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 2734 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2740 Register dividend = ToRegister32(instr->dividend()); 2735 Register dividend = ToRegister32(instr->dividend());
2741 int32_t divisor = instr->divisor(); 2736 int32_t divisor = instr->divisor();
2742 Register result = ToRegister32(instr->result()); 2737 Register result = ToRegister32(instr->result());
2743 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); 2738 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
2744 DCHECK(!result.is(dividend)); 2739 DCHECK(!result.is(dividend));
2745 2740
2746 // Check for (0 / -x) that will produce negative zero. 2741 // Check for (0 / -x) that will produce negative zero.
2747 HDiv* hdiv = instr->hydrogen(); 2742 HDiv* hdiv = instr->hydrogen();
2748 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 2743 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2749 DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero); 2744 DeoptimizeIfZero(dividend, instr, "division by zero");
2750 } 2745 }
2751 // Check for (kMinInt / -1). 2746 // Check for (kMinInt / -1).
2752 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 2747 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2753 // Test dividend for kMinInt by subtracting one (cmp) and checking for 2748 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2754 // overflow. 2749 // overflow.
2755 __ Cmp(dividend, 1); 2750 __ Cmp(dividend, 1);
2756 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 2751 DeoptimizeIf(vs, instr, "overflow");
2757 } 2752 }
2758 // Deoptimize if remainder will not be 0. 2753 // Deoptimize if remainder will not be 0.
2759 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && 2754 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2760 divisor != 1 && divisor != -1) { 2755 divisor != 1 && divisor != -1) {
2761 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 2756 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2762 __ Tst(dividend, mask); 2757 __ Tst(dividend, mask);
2763 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); 2758 DeoptimizeIf(ne, instr, "lost precision");
2764 } 2759 }
2765 2760
2766 if (divisor == -1) { // Nice shortcut, not needed for correctness. 2761 if (divisor == -1) { // Nice shortcut, not needed for correctness.
2767 __ Neg(result, dividend); 2762 __ Neg(result, dividend);
2768 return; 2763 return;
2769 } 2764 }
2770 int32_t shift = WhichPowerOf2Abs(divisor); 2765 int32_t shift = WhichPowerOf2Abs(divisor);
2771 if (shift == 0) { 2766 if (shift == 0) {
2772 __ Mov(result, dividend); 2767 __ Mov(result, dividend);
2773 } else if (shift == 1) { 2768 } else if (shift == 1) {
2774 __ Add(result, dividend, Operand(dividend, LSR, 31)); 2769 __ Add(result, dividend, Operand(dividend, LSR, 31));
2775 } else { 2770 } else {
2776 __ Mov(result, Operand(dividend, ASR, 31)); 2771 __ Mov(result, Operand(dividend, ASR, 31));
2777 __ Add(result, dividend, Operand(result, LSR, 32 - shift)); 2772 __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2778 } 2773 }
2779 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); 2774 if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2780 if (divisor < 0) __ Neg(result, result); 2775 if (divisor < 0) __ Neg(result, result);
2781 } 2776 }
2782 2777
2783 2778
2784 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 2779 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2785 Register dividend = ToRegister32(instr->dividend()); 2780 Register dividend = ToRegister32(instr->dividend());
2786 int32_t divisor = instr->divisor(); 2781 int32_t divisor = instr->divisor();
2787 Register result = ToRegister32(instr->result()); 2782 Register result = ToRegister32(instr->result());
2788 DCHECK(!AreAliased(dividend, result)); 2783 DCHECK(!AreAliased(dividend, result));
2789 2784
2790 if (divisor == 0) { 2785 if (divisor == 0) {
2791 Deoptimize(instr, Deoptimizer::kDivisionByZero); 2786 Deoptimize(instr, "division by zero");
2792 return; 2787 return;
2793 } 2788 }
2794 2789
2795 // Check for (0 / -x) that will produce negative zero. 2790 // Check for (0 / -x) that will produce negative zero.
2796 HDiv* hdiv = instr->hydrogen(); 2791 HDiv* hdiv = instr->hydrogen();
2797 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 2792 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2798 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero); 2793 DeoptimizeIfZero(dividend, instr, "minus zero");
2799 } 2794 }
2800 2795
2801 __ TruncatingDiv(result, dividend, Abs(divisor)); 2796 __ TruncatingDiv(result, dividend, Abs(divisor));
2802 if (divisor < 0) __ Neg(result, result); 2797 if (divisor < 0) __ Neg(result, result);
2803 2798
2804 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 2799 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2805 Register temp = ToRegister32(instr->temp()); 2800 Register temp = ToRegister32(instr->temp());
2806 DCHECK(!AreAliased(dividend, result, temp)); 2801 DCHECK(!AreAliased(dividend, result, temp));
2807 __ Sxtw(dividend.X(), dividend); 2802 __ Sxtw(dividend.X(), dividend);
2808 __ Mov(temp, divisor); 2803 __ Mov(temp, divisor);
2809 __ Smsubl(temp.X(), result, temp, dividend.X()); 2804 __ Smsubl(temp.X(), result, temp, dividend.X());
2810 DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision); 2805 DeoptimizeIfNotZero(temp, instr, "lost precision");
2811 } 2806 }
2812 } 2807 }
2813 2808
2814 2809
2815 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 2810 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
2816 void LCodeGen::DoDivI(LDivI* instr) { 2811 void LCodeGen::DoDivI(LDivI* instr) {
2817 HBinaryOperation* hdiv = instr->hydrogen(); 2812 HBinaryOperation* hdiv = instr->hydrogen();
2818 Register dividend = ToRegister32(instr->dividend()); 2813 Register dividend = ToRegister32(instr->dividend());
2819 Register divisor = ToRegister32(instr->divisor()); 2814 Register divisor = ToRegister32(instr->divisor());
2820 Register result = ToRegister32(instr->result()); 2815 Register result = ToRegister32(instr->result());
2821 2816
2822 // Issue the division first, and then check for any deopt cases whilst the 2817 // Issue the division first, and then check for any deopt cases whilst the
2823 // result is computed. 2818 // result is computed.
2824 __ Sdiv(result, dividend, divisor); 2819 __ Sdiv(result, dividend, divisor);
2825 2820
2826 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 2821 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2827 DCHECK(!instr->temp()); 2822 DCHECK(!instr->temp());
2828 return; 2823 return;
2829 } 2824 }
2830 2825
2831 // Check for x / 0. 2826 // Check for x / 0.
2832 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 2827 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2833 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); 2828 DeoptimizeIfZero(divisor, instr, "division by zero");
2834 } 2829 }
2835 2830
2836 // Check for (0 / -x) as that will produce negative zero. 2831 // Check for (0 / -x) as that will produce negative zero.
2837 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 2832 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2838 __ Cmp(divisor, 0); 2833 __ Cmp(divisor, 0);
2839 2834
2840 // If the divisor < 0 (mi), compare the dividend, and deopt if it is 2835 // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2841 // zero, ie. zero dividend with negative divisor deopts. 2836 // zero, ie. zero dividend with negative divisor deopts.
2842 // If the divisor >= 0 (pl, the opposite of mi) set the flags to 2837 // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2843 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. 2838 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2844 __ Ccmp(dividend, 0, NoFlag, mi); 2839 __ Ccmp(dividend, 0, NoFlag, mi);
2845 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 2840 DeoptimizeIf(eq, instr, "minus zero");
2846 } 2841 }
2847 2842
2848 // Check for (kMinInt / -1). 2843 // Check for (kMinInt / -1).
2849 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 2844 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2850 // Test dividend for kMinInt by subtracting one (cmp) and checking for 2845 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2851 // overflow. 2846 // overflow.
2852 __ Cmp(dividend, 1); 2847 __ Cmp(dividend, 1);
2853 // If overflow is set, ie. dividend = kMinInt, compare the divisor with 2848 // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2854 // -1. If overflow is clear, set the flags for condition ne, as the 2849 // -1. If overflow is clear, set the flags for condition ne, as the
2855 // dividend isn't -1, and thus we shouldn't deopt. 2850 // dividend isn't -1, and thus we shouldn't deopt.
2856 __ Ccmp(divisor, -1, NoFlag, vs); 2851 __ Ccmp(divisor, -1, NoFlag, vs);
2857 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 2852 DeoptimizeIf(eq, instr, "overflow");
2858 } 2853 }
2859 2854
2860 // Compute remainder and deopt if it's not zero. 2855 // Compute remainder and deopt if it's not zero.
2861 Register remainder = ToRegister32(instr->temp()); 2856 Register remainder = ToRegister32(instr->temp());
2862 __ Msub(remainder, result, divisor, dividend); 2857 __ Msub(remainder, result, divisor, dividend);
2863 DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision); 2858 DeoptimizeIfNotZero(remainder, instr, "lost precision");
2864 } 2859 }
2865 2860
2866 2861
2867 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { 2862 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2868 DoubleRegister input = ToDoubleRegister(instr->value()); 2863 DoubleRegister input = ToDoubleRegister(instr->value());
2869 Register result = ToRegister32(instr->result()); 2864 Register result = ToRegister32(instr->result());
2870 2865
2871 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 2866 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2872 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero); 2867 DeoptimizeIfMinusZero(input, instr, "minus zero");
2873 } 2868 }
2874 2869
2875 __ TryRepresentDoubleAsInt32(result, input, double_scratch()); 2870 __ TryRepresentDoubleAsInt32(result, input, double_scratch());
2876 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 2871 DeoptimizeIf(ne, instr, "lost precision or NaN");
2877 2872
2878 if (instr->tag_result()) { 2873 if (instr->tag_result()) {
2879 __ SmiTag(result.X()); 2874 __ SmiTag(result.X());
2880 } 2875 }
2881 } 2876 }
2882 2877
2883 2878
2884 void LCodeGen::DoDrop(LDrop* instr) { 2879 void LCodeGen::DoDrop(LDrop* instr) {
2885 __ Drop(instr->count()); 2880 __ Drop(instr->count());
2886 } 2881 }
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
2927 __ EnumLengthUntagged(result, map); 2922 __ EnumLengthUntagged(result, map);
2928 __ Cbnz(result, &load_cache); 2923 __ Cbnz(result, &load_cache);
2929 2924
2930 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array())); 2925 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2931 __ B(&done); 2926 __ B(&done);
2932 2927
2933 __ Bind(&load_cache); 2928 __ Bind(&load_cache);
2934 __ LoadInstanceDescriptors(map, result); 2929 __ LoadInstanceDescriptors(map, result);
2935 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 2930 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2936 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 2931 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2937 DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache); 2932 DeoptimizeIfZero(result, instr, "no cache");
2938 2933
2939 __ Bind(&done); 2934 __ Bind(&done);
2940 } 2935 }
2941 2936
2942 2937
2943 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 2938 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2944 Register object = ToRegister(instr->object()); 2939 Register object = ToRegister(instr->object());
2945 Register null_value = x5; 2940 Register null_value = x5;
2946 2941
2947 DCHECK(instr->IsMarkedAsCall()); 2942 DCHECK(instr->IsMarkedAsCall());
2948 DCHECK(object.Is(x0)); 2943 DCHECK(object.Is(x0));
2949 2944
2950 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, 2945 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, "undefined");
2951 Deoptimizer::kUndefined);
2952 2946
2953 __ LoadRoot(null_value, Heap::kNullValueRootIndex); 2947 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2954 __ Cmp(object, null_value); 2948 __ Cmp(object, null_value);
2955 DeoptimizeIf(eq, instr, Deoptimizer::kNull); 2949 DeoptimizeIf(eq, instr, "null");
2956 2950
2957 DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi); 2951 DeoptimizeIfSmi(object, instr, "Smi");
2958 2952
2959 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); 2953 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
2960 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE); 2954 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2961 DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject); 2955 DeoptimizeIf(le, instr, "not a JavaScript object");
2962 2956
2963 Label use_cache, call_runtime; 2957 Label use_cache, call_runtime;
2964 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime); 2958 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2965 2959
2966 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); 2960 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2967 __ B(&use_cache); 2961 __ B(&use_cache);
2968 2962
2969 // Get the set of properties to enumerate. 2963 // Get the set of properties to enumerate.
2970 __ Bind(&call_runtime); 2964 __ Bind(&call_runtime);
2971 __ Push(object); 2965 __ Push(object);
2972 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); 2966 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2973 2967
2974 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset)); 2968 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2975 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, 2969 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, "wrong map");
2976 Deoptimizer::kWrongMap);
2977 2970
2978 __ Bind(&use_cache); 2971 __ Bind(&use_cache);
2979 } 2972 }
2980 2973
2981 2974
2982 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2975 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2983 Register input = ToRegister(instr->value()); 2976 Register input = ToRegister(instr->value());
2984 Register result = ToRegister(instr->result()); 2977 Register result = ToRegister(instr->result());
2985 2978
2986 __ AssertString(input); 2979 __ AssertString(input);
(...skipping 370 matching lines...) Expand 10 before | Expand all | Expand 10 after
3357 DoGap(label); 3350 DoGap(label);
3358 } 3351 }
3359 3352
3360 3353
3361 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 3354 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3362 Register context = ToRegister(instr->context()); 3355 Register context = ToRegister(instr->context());
3363 Register result = ToRegister(instr->result()); 3356 Register result = ToRegister(instr->result());
3364 __ Ldr(result, ContextMemOperand(context, instr->slot_index())); 3357 __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3365 if (instr->hydrogen()->RequiresHoleCheck()) { 3358 if (instr->hydrogen()->RequiresHoleCheck()) {
3366 if (instr->hydrogen()->DeoptimizesOnHole()) { 3359 if (instr->hydrogen()->DeoptimizesOnHole()) {
3367 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, 3360 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
3368 Deoptimizer::kHole);
3369 } else { 3361 } else {
3370 Label not_the_hole; 3362 Label not_the_hole;
3371 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole); 3363 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
3372 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 3364 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3373 __ Bind(&not_the_hole); 3365 __ Bind(&not_the_hole);
3374 } 3366 }
3375 } 3367 }
3376 } 3368 }
3377 3369
3378 3370
3379 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 3371 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3380 Register function = ToRegister(instr->function()); 3372 Register function = ToRegister(instr->function());
3381 Register result = ToRegister(instr->result()); 3373 Register result = ToRegister(instr->result());
3382 Register temp = ToRegister(instr->temp()); 3374 Register temp = ToRegister(instr->temp());
3383 3375
3384 // Get the prototype or initial map from the function. 3376 // Get the prototype or initial map from the function.
3385 __ Ldr(result, FieldMemOperand(function, 3377 __ Ldr(result, FieldMemOperand(function,
3386 JSFunction::kPrototypeOrInitialMapOffset)); 3378 JSFunction::kPrototypeOrInitialMapOffset));
3387 3379
3388 // Check that the function has a prototype or an initial map. 3380 // Check that the function has a prototype or an initial map.
3389 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, 3381 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
3390 Deoptimizer::kHole);
3391 3382
3392 // If the function does not have an initial map, we're done. 3383 // If the function does not have an initial map, we're done.
3393 Label done; 3384 Label done;
3394 __ CompareObjectType(result, temp, temp, MAP_TYPE); 3385 __ CompareObjectType(result, temp, temp, MAP_TYPE);
3395 __ B(ne, &done); 3386 __ B(ne, &done);
3396 3387
3397 // Get the prototype from the initial map. 3388 // Get the prototype from the initial map.
3398 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); 3389 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3399 3390
3400 // All done. 3391 // All done.
3401 __ Bind(&done); 3392 __ Bind(&done);
3402 } 3393 }
3403 3394
3404 3395
3405 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 3396 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3406 Register result = ToRegister(instr->result()); 3397 Register result = ToRegister(instr->result());
3407 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); 3398 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
3408 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); 3399 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3409 if (instr->hydrogen()->RequiresHoleCheck()) { 3400 if (instr->hydrogen()->RequiresHoleCheck()) {
3410 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, 3401 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
3411 Deoptimizer::kHole);
3412 } 3402 }
3413 } 3403 }
3414 3404
3415 3405
3416 template <class T> 3406 template <class T>
3417 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { 3407 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
3418 DCHECK(FLAG_vector_ics); 3408 DCHECK(FLAG_vector_ics);
3419 Register vector_register = ToRegister(instr->temp_vector()); 3409 Register vector_register = ToRegister(instr->temp_vector());
3420 Register slot_register = VectorLoadICDescriptor::SlotRegister(); 3410 Register slot_register = VectorLoadICDescriptor::SlotRegister();
3421 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); 3411 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
3537 case EXTERNAL_INT32_ELEMENTS: 3527 case EXTERNAL_INT32_ELEMENTS:
3538 case INT32_ELEMENTS: 3528 case INT32_ELEMENTS:
3539 __ Ldrsw(result, mem_op); 3529 __ Ldrsw(result, mem_op);
3540 break; 3530 break;
3541 case EXTERNAL_UINT32_ELEMENTS: 3531 case EXTERNAL_UINT32_ELEMENTS:
3542 case UINT32_ELEMENTS: 3532 case UINT32_ELEMENTS:
3543 __ Ldr(result.W(), mem_op); 3533 __ Ldr(result.W(), mem_op);
3544 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 3534 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3545 // Deopt if value > 0x80000000. 3535 // Deopt if value > 0x80000000.
3546 __ Tst(result, 0xFFFFFFFF80000000); 3536 __ Tst(result, 0xFFFFFFFF80000000);
3547 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue); 3537 DeoptimizeIf(ne, instr, "negative value");
3548 } 3538 }
3549 break; 3539 break;
3550 case FLOAT32_ELEMENTS: 3540 case FLOAT32_ELEMENTS:
3551 case FLOAT64_ELEMENTS: 3541 case FLOAT64_ELEMENTS:
3552 case EXTERNAL_FLOAT32_ELEMENTS: 3542 case EXTERNAL_FLOAT32_ELEMENTS:
3553 case EXTERNAL_FLOAT64_ELEMENTS: 3543 case EXTERNAL_FLOAT64_ELEMENTS:
3554 case FAST_HOLEY_DOUBLE_ELEMENTS: 3544 case FAST_HOLEY_DOUBLE_ELEMENTS:
3555 case FAST_HOLEY_ELEMENTS: 3545 case FAST_HOLEY_ELEMENTS:
3556 case FAST_HOLEY_SMI_ELEMENTS: 3546 case FAST_HOLEY_SMI_ELEMENTS:
3557 case FAST_DOUBLE_ELEMENTS: 3547 case FAST_DOUBLE_ELEMENTS:
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
3631 instr->hydrogen()->representation(), 3621 instr->hydrogen()->representation(),
3632 instr->base_offset()); 3622 instr->base_offset());
3633 } 3623 }
3634 3624
3635 __ Ldr(result, mem_op); 3625 __ Ldr(result, mem_op);
3636 3626
3637 if (instr->hydrogen()->RequiresHoleCheck()) { 3627 if (instr->hydrogen()->RequiresHoleCheck()) {
3638 Register scratch = ToRegister(instr->temp()); 3628 Register scratch = ToRegister(instr->temp());
3639 __ Fmov(scratch, result); 3629 __ Fmov(scratch, result);
3640 __ Eor(scratch, scratch, kHoleNanInt64); 3630 __ Eor(scratch, scratch, kHoleNanInt64);
3641 DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole); 3631 DeoptimizeIfZero(scratch, instr, "hole");
3642 } 3632 }
3643 } 3633 }
3644 3634
3645 3635
3646 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { 3636 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3647 Register elements = ToRegister(instr->elements()); 3637 Register elements = ToRegister(instr->elements());
3648 Register result = ToRegister(instr->result()); 3638 Register result = ToRegister(instr->result());
3649 MemOperand mem_op; 3639 MemOperand mem_op;
3650 3640
3651 Representation representation = instr->hydrogen()->representation(); 3641 Representation representation = instr->hydrogen()->representation();
(...skipping 17 matching lines...) Expand all
3669 3659
3670 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, 3660 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3671 instr->hydrogen()->elements_kind(), 3661 instr->hydrogen()->elements_kind(),
3672 representation, instr->base_offset()); 3662 representation, instr->base_offset());
3673 } 3663 }
3674 3664
3675 __ Load(result, mem_op, representation); 3665 __ Load(result, mem_op, representation);
3676 3666
3677 if (instr->hydrogen()->RequiresHoleCheck()) { 3667 if (instr->hydrogen()->RequiresHoleCheck()) {
3678 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 3668 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3679 DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi); 3669 DeoptimizeIfNotSmi(result, instr, "not a Smi");
3680 } else { 3670 } else {
3681 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, 3671 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
3682 Deoptimizer::kHole);
3683 } 3672 }
3684 } 3673 }
3685 } 3674 }
3686 3675
3687 3676
3688 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3677 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3689 DCHECK(ToRegister(instr->context()).is(cp)); 3678 DCHECK(ToRegister(instr->context()).is(cp));
3690 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 3679 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3691 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); 3680 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3692 if (FLAG_vector_ics) { 3681 if (FLAG_vector_ics) {
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
3776 if (r.IsDouble()) { 3765 if (r.IsDouble()) {
3777 DoubleRegister input = ToDoubleRegister(instr->value()); 3766 DoubleRegister input = ToDoubleRegister(instr->value());
3778 DoubleRegister result = ToDoubleRegister(instr->result()); 3767 DoubleRegister result = ToDoubleRegister(instr->result());
3779 __ Fabs(result, input); 3768 __ Fabs(result, input);
3780 } else if (r.IsSmi() || r.IsInteger32()) { 3769 } else if (r.IsSmi() || r.IsInteger32()) {
3781 Register input = r.IsSmi() ? ToRegister(instr->value()) 3770 Register input = r.IsSmi() ? ToRegister(instr->value())
3782 : ToRegister32(instr->value()); 3771 : ToRegister32(instr->value());
3783 Register result = r.IsSmi() ? ToRegister(instr->result()) 3772 Register result = r.IsSmi() ? ToRegister(instr->result())
3784 : ToRegister32(instr->result()); 3773 : ToRegister32(instr->result());
3785 __ Abs(result, input); 3774 __ Abs(result, input);
3786 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 3775 DeoptimizeIf(vs, instr, "overflow");
3787 } 3776 }
3788 } 3777 }
3789 3778
3790 3779
3791 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr, 3780 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3792 Label* exit, 3781 Label* exit,
3793 Label* allocation_entry) { 3782 Label* allocation_entry) {
3794 // Handle the tricky cases of MathAbsTagged: 3783 // Handle the tricky cases of MathAbsTagged:
3795 // - HeapNumber inputs. 3784 // - HeapNumber inputs.
3796 // - Negative inputs produce a positive result, so a new HeapNumber is 3785 // - Negative inputs produce a positive result, so a new HeapNumber is
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
3928 3917
3929 __ Frintm(result, input); 3918 __ Frintm(result, input);
3930 } 3919 }
3931 3920
3932 3921
3933 void LCodeGen::DoMathFloorI(LMathFloorI* instr) { 3922 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3934 DoubleRegister input = ToDoubleRegister(instr->value()); 3923 DoubleRegister input = ToDoubleRegister(instr->value());
3935 Register result = ToRegister(instr->result()); 3924 Register result = ToRegister(instr->result());
3936 3925
3937 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3926 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3938 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero); 3927 DeoptimizeIfMinusZero(input, instr, "minus zero");
3939 } 3928 }
3940 3929
3941 __ Fcvtms(result, input); 3930 __ Fcvtms(result, input);
3942 3931
3943 // Check that the result fits into a 32-bit integer. 3932 // Check that the result fits into a 32-bit integer.
3944 // - The result did not overflow. 3933 // - The result did not overflow.
3945 __ Cmp(result, Operand(result, SXTW)); 3934 __ Cmp(result, Operand(result, SXTW));
3946 // - The input was not NaN. 3935 // - The input was not NaN.
3947 __ Fccmp(input, input, NoFlag, eq); 3936 __ Fccmp(input, input, NoFlag, eq);
3948 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 3937 DeoptimizeIf(ne, instr, "lost precision or NaN");
3949 } 3938 }
3950 3939
3951 3940
3952 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 3941 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3953 Register dividend = ToRegister32(instr->dividend()); 3942 Register dividend = ToRegister32(instr->dividend());
3954 Register result = ToRegister32(instr->result()); 3943 Register result = ToRegister32(instr->result());
3955 int32_t divisor = instr->divisor(); 3944 int32_t divisor = instr->divisor();
3956 3945
3957 // If the divisor is 1, return the dividend. 3946 // If the divisor is 1, return the dividend.
3958 if (divisor == 1) { 3947 if (divisor == 1) {
3959 __ Mov(result, dividend, kDiscardForSameWReg); 3948 __ Mov(result, dividend, kDiscardForSameWReg);
3960 return; 3949 return;
3961 } 3950 }
3962 3951
3963 // If the divisor is positive, things are easy: There can be no deopts and we 3952 // If the divisor is positive, things are easy: There can be no deopts and we
3964 // can simply do an arithmetic right shift. 3953 // can simply do an arithmetic right shift.
3965 int32_t shift = WhichPowerOf2Abs(divisor); 3954 int32_t shift = WhichPowerOf2Abs(divisor);
3966 if (divisor > 1) { 3955 if (divisor > 1) {
3967 __ Mov(result, Operand(dividend, ASR, shift)); 3956 __ Mov(result, Operand(dividend, ASR, shift));
3968 return; 3957 return;
3969 } 3958 }
3970 3959
3971 // If the divisor is negative, we have to negate and handle edge cases. 3960 // If the divisor is negative, we have to negate and handle edge cases.
3972 __ Negs(result, dividend); 3961 __ Negs(result, dividend);
3973 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3962 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3974 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 3963 DeoptimizeIf(eq, instr, "minus zero");
3975 } 3964 }
3976 3965
3977 // Dividing by -1 is basically negation, unless we overflow. 3966 // Dividing by -1 is basically negation, unless we overflow.
3978 if (divisor == -1) { 3967 if (divisor == -1) {
3979 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 3968 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3980 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 3969 DeoptimizeIf(vs, instr, "overflow");
3981 } 3970 }
3982 return; 3971 return;
3983 } 3972 }
3984 3973
3985 // If the negation could not overflow, simply shifting is OK. 3974 // If the negation could not overflow, simply shifting is OK.
3986 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 3975 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3987 __ Mov(result, Operand(dividend, ASR, shift)); 3976 __ Mov(result, Operand(dividend, ASR, shift));
3988 return; 3977 return;
3989 } 3978 }
3990 3979
3991 __ Asr(result, result, shift); 3980 __ Asr(result, result, shift);
3992 __ Csel(result, result, kMinInt / divisor, vc); 3981 __ Csel(result, result, kMinInt / divisor, vc);
3993 } 3982 }
3994 3983
3995 3984
3996 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 3985 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3997 Register dividend = ToRegister32(instr->dividend()); 3986 Register dividend = ToRegister32(instr->dividend());
3998 int32_t divisor = instr->divisor(); 3987 int32_t divisor = instr->divisor();
3999 Register result = ToRegister32(instr->result()); 3988 Register result = ToRegister32(instr->result());
4000 DCHECK(!AreAliased(dividend, result)); 3989 DCHECK(!AreAliased(dividend, result));
4001 3990
4002 if (divisor == 0) { 3991 if (divisor == 0) {
4003 Deoptimize(instr, Deoptimizer::kDivisionByZero); 3992 Deoptimize(instr, "division by zero");
4004 return; 3993 return;
4005 } 3994 }
4006 3995
4007 // Check for (0 / -x) that will produce negative zero. 3996 // Check for (0 / -x) that will produce negative zero.
4008 HMathFloorOfDiv* hdiv = instr->hydrogen(); 3997 HMathFloorOfDiv* hdiv = instr->hydrogen();
4009 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 3998 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
4010 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero); 3999 DeoptimizeIfZero(dividend, instr, "minus zero");
4011 } 4000 }
4012 4001
4013 // Easy case: We need no dynamic check for the dividend and the flooring 4002 // Easy case: We need no dynamic check for the dividend and the flooring
4014 // division is the same as the truncating division. 4003 // division is the same as the truncating division.
4015 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 4004 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
4016 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 4005 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
4017 __ TruncatingDiv(result, dividend, Abs(divisor)); 4006 __ TruncatingDiv(result, dividend, Abs(divisor));
4018 if (divisor < 0) __ Neg(result, result); 4007 if (divisor < 0) __ Neg(result, result);
4019 return; 4008 return;
4020 } 4009 }
(...skipping 22 matching lines...) Expand all
4043 Register dividend = ToRegister32(instr->dividend()); 4032 Register dividend = ToRegister32(instr->dividend());
4044 Register divisor = ToRegister32(instr->divisor()); 4033 Register divisor = ToRegister32(instr->divisor());
4045 Register remainder = ToRegister32(instr->temp()); 4034 Register remainder = ToRegister32(instr->temp());
4046 Register result = ToRegister32(instr->result()); 4035 Register result = ToRegister32(instr->result());
4047 4036
4048 // This can't cause an exception on ARM, so we can speculatively 4037 // This can't cause an exception on ARM, so we can speculatively
4049 // execute it already now. 4038 // execute it already now.
4050 __ Sdiv(result, dividend, divisor); 4039 __ Sdiv(result, dividend, divisor);
4051 4040
4052 // Check for x / 0. 4041 // Check for x / 0.
4053 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); 4042 DeoptimizeIfZero(divisor, instr, "division by zero");
4054 4043
4055 // Check for (kMinInt / -1). 4044 // Check for (kMinInt / -1).
4056 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 4045 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
4057 // The V flag will be set iff dividend == kMinInt. 4046 // The V flag will be set iff dividend == kMinInt.
4058 __ Cmp(dividend, 1); 4047 __ Cmp(dividend, 1);
4059 __ Ccmp(divisor, -1, NoFlag, vs); 4048 __ Ccmp(divisor, -1, NoFlag, vs);
4060 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 4049 DeoptimizeIf(eq, instr, "overflow");
4061 } 4050 }
4062 4051
4063 // Check for (0 / -x) that will produce negative zero. 4052 // Check for (0 / -x) that will produce negative zero.
4064 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4053 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4065 __ Cmp(divisor, 0); 4054 __ Cmp(divisor, 0);
4066 __ Ccmp(dividend, 0, ZFlag, mi); 4055 __ Ccmp(dividend, 0, ZFlag, mi);
4067 // "divisor" can't be null because the code would have already been 4056 // "divisor" can't be null because the code would have already been
4068 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). 4057 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
4069 // In this case we need to deoptimize to produce a -0. 4058 // In this case we need to deoptimize to produce a -0.
4070 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 4059 DeoptimizeIf(eq, instr, "minus zero");
4071 } 4060 }
4072 4061
4073 Label done; 4062 Label done;
4074 // If both operands have the same sign then we are done. 4063 // If both operands have the same sign then we are done.
4075 __ Eor(remainder, dividend, divisor); 4064 __ Eor(remainder, dividend, divisor);
4076 __ Tbz(remainder, kWSignBit, &done); 4065 __ Tbz(remainder, kWSignBit, &done);
4077 4066
4078 // Check if the result needs to be corrected. 4067 // Check if the result needs to be corrected.
4079 __ Msub(remainder, result, divisor, dividend); 4068 __ Msub(remainder, result, divisor, dividend);
4080 __ Cbz(remainder, &done); 4069 __ Cbz(remainder, &done);
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
4219 // result fits in 32 bits. 4208 // result fits in 32 bits.
4220 __ Cmp(result, Operand(result.W(), SXTW)); 4209 __ Cmp(result, Operand(result.W(), SXTW));
4221 __ Ccmp(result, 1, ZFlag, eq); 4210 __ Ccmp(result, 1, ZFlag, eq);
4222 __ B(hi, &done); 4211 __ B(hi, &done);
4223 4212
4224 // At this point, we have to handle possible inputs of NaN or numbers in the 4213 // At this point, we have to handle possible inputs of NaN or numbers in the
4225 // range [-0.5, 1.5[, or numbers larger than 32 bits. 4214 // range [-0.5, 1.5[, or numbers larger than 32 bits.
4226 4215
4227 // Deoptimize if the result > 1, as it must be larger than 32 bits. 4216 // Deoptimize if the result > 1, as it must be larger than 32 bits.
4228 __ Cmp(result, 1); 4217 __ Cmp(result, 1);
4229 DeoptimizeIf(hi, instr, Deoptimizer::kOverflow); 4218 DeoptimizeIf(hi, instr, "overflow");
4230 4219
4231 // Deoptimize for negative inputs, which at this point are only numbers in 4220 // Deoptimize for negative inputs, which at this point are only numbers in
4232 // the range [-0.5, -0.0] 4221 // the range [-0.5, -0.0]
4233 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4222 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4234 __ Fmov(result, input); 4223 __ Fmov(result, input);
4235 DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero); 4224 DeoptimizeIfNegative(result, instr, "minus zero");
4236 } 4225 }
4237 4226
4238 // Deoptimize if the input was NaN. 4227 // Deoptimize if the input was NaN.
4239 __ Fcmp(input, dot_five); 4228 __ Fcmp(input, dot_five);
4240 DeoptimizeIf(vs, instr, Deoptimizer::kNaN); 4229 DeoptimizeIf(vs, instr, "NaN");
4241 4230
4242 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[ 4231 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
4243 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1, 4232 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
4244 // else 0; we avoid dealing with 0.499...94 directly. 4233 // else 0; we avoid dealing with 0.499...94 directly.
4245 __ Cset(result, ge); 4234 __ Cset(result, ge);
4246 __ Bind(&done); 4235 __ Bind(&done);
4247 } 4236 }
4248 4237
4249 4238
4250 void LCodeGen::DoMathFround(LMathFround* instr) { 4239 void LCodeGen::DoMathFround(LMathFround* instr) {
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
4308 HMod* hmod = instr->hydrogen(); 4297 HMod* hmod = instr->hydrogen();
4309 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 4298 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4310 Label dividend_is_not_negative, done; 4299 Label dividend_is_not_negative, done;
4311 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 4300 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4312 __ Tbz(dividend, kWSignBit, &dividend_is_not_negative); 4301 __ Tbz(dividend, kWSignBit, &dividend_is_not_negative);
4313 // Note that this is correct even for kMinInt operands. 4302 // Note that this is correct even for kMinInt operands.
4314 __ Neg(dividend, dividend); 4303 __ Neg(dividend, dividend);
4315 __ And(dividend, dividend, mask); 4304 __ And(dividend, dividend, mask);
4316 __ Negs(dividend, dividend); 4305 __ Negs(dividend, dividend);
4317 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 4306 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4318 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 4307 DeoptimizeIf(eq, instr, "minus zero");
4319 } 4308 }
4320 __ B(&done); 4309 __ B(&done);
4321 } 4310 }
4322 4311
4323 __ bind(&dividend_is_not_negative); 4312 __ bind(&dividend_is_not_negative);
4324 __ And(dividend, dividend, mask); 4313 __ And(dividend, dividend, mask);
4325 __ bind(&done); 4314 __ bind(&done);
4326 } 4315 }
4327 4316
4328 4317
4329 void LCodeGen::DoModByConstI(LModByConstI* instr) { 4318 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4330 Register dividend = ToRegister32(instr->dividend()); 4319 Register dividend = ToRegister32(instr->dividend());
4331 int32_t divisor = instr->divisor(); 4320 int32_t divisor = instr->divisor();
4332 Register result = ToRegister32(instr->result()); 4321 Register result = ToRegister32(instr->result());
4333 Register temp = ToRegister32(instr->temp()); 4322 Register temp = ToRegister32(instr->temp());
4334 DCHECK(!AreAliased(dividend, result, temp)); 4323 DCHECK(!AreAliased(dividend, result, temp));
4335 4324
4336 if (divisor == 0) { 4325 if (divisor == 0) {
4337 Deoptimize(instr, Deoptimizer::kDivisionByZero); 4326 Deoptimize(instr, "division by zero");
4338 return; 4327 return;
4339 } 4328 }
4340 4329
4341 __ TruncatingDiv(result, dividend, Abs(divisor)); 4330 __ TruncatingDiv(result, dividend, Abs(divisor));
4342 __ Sxtw(dividend.X(), dividend); 4331 __ Sxtw(dividend.X(), dividend);
4343 __ Mov(temp, Abs(divisor)); 4332 __ Mov(temp, Abs(divisor));
4344 __ Smsubl(result.X(), result, temp, dividend.X()); 4333 __ Smsubl(result.X(), result, temp, dividend.X());
4345 4334
4346 // Check for negative zero. 4335 // Check for negative zero.
4347 HMod* hmod = instr->hydrogen(); 4336 HMod* hmod = instr->hydrogen();
4348 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 4337 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4349 Label remainder_not_zero; 4338 Label remainder_not_zero;
4350 __ Cbnz(result, &remainder_not_zero); 4339 __ Cbnz(result, &remainder_not_zero);
4351 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero); 4340 DeoptimizeIfNegative(dividend, instr, "minus zero");
4352 __ bind(&remainder_not_zero); 4341 __ bind(&remainder_not_zero);
4353 } 4342 }
4354 } 4343 }
4355 4344
4356 4345
4357 void LCodeGen::DoModI(LModI* instr) { 4346 void LCodeGen::DoModI(LModI* instr) {
4358 Register dividend = ToRegister32(instr->left()); 4347 Register dividend = ToRegister32(instr->left());
4359 Register divisor = ToRegister32(instr->right()); 4348 Register divisor = ToRegister32(instr->right());
4360 Register result = ToRegister32(instr->result()); 4349 Register result = ToRegister32(instr->result());
4361 4350
4362 Label done; 4351 Label done;
4363 // modulo = dividend - quotient * divisor 4352 // modulo = dividend - quotient * divisor
4364 __ Sdiv(result, dividend, divisor); 4353 __ Sdiv(result, dividend, divisor);
4365 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 4354 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
4366 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); 4355 DeoptimizeIfZero(divisor, instr, "division by zero");
4367 } 4356 }
4368 __ Msub(result, result, divisor, dividend); 4357 __ Msub(result, result, divisor, dividend);
4369 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4358 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4370 __ Cbnz(result, &done); 4359 __ Cbnz(result, &done);
4371 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero); 4360 DeoptimizeIfNegative(dividend, instr, "minus zero");
4372 } 4361 }
4373 __ Bind(&done); 4362 __ Bind(&done);
4374 } 4363 }
4375 4364
4376 4365
4377 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { 4366 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4378 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32()); 4367 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
4379 bool is_smi = instr->hydrogen()->representation().IsSmi(); 4368 bool is_smi = instr->hydrogen()->representation().IsSmi();
4380 Register result = 4369 Register result =
4381 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); 4370 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4382 Register left = 4371 Register left =
4383 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ; 4372 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4384 int32_t right = ToInteger32(instr->right()); 4373 int32_t right = ToInteger32(instr->right());
4385 DCHECK((right > -kMaxInt) || (right < kMaxInt)); 4374 DCHECK((right > -kMaxInt) || (right < kMaxInt));
4386 4375
4387 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 4376 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4388 bool bailout_on_minus_zero = 4377 bool bailout_on_minus_zero =
4389 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 4378 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4390 4379
4391 if (bailout_on_minus_zero) { 4380 if (bailout_on_minus_zero) {
4392 if (right < 0) { 4381 if (right < 0) {
4393 // The result is -0 if right is negative and left is zero. 4382 // The result is -0 if right is negative and left is zero.
4394 DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero); 4383 DeoptimizeIfZero(left, instr, "minus zero");
4395 } else if (right == 0) { 4384 } else if (right == 0) {
4396 // The result is -0 if the right is zero and the left is negative. 4385 // The result is -0 if the right is zero and the left is negative.
4397 DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero); 4386 DeoptimizeIfNegative(left, instr, "minus zero");
4398 } 4387 }
4399 } 4388 }
4400 4389
4401 switch (right) { 4390 switch (right) {
4402 // Cases which can detect overflow. 4391 // Cases which can detect overflow.
4403 case -1: 4392 case -1:
4404 if (can_overflow) { 4393 if (can_overflow) {
4405 // Only 0x80000000 can overflow here. 4394 // Only 0x80000000 can overflow here.
4406 __ Negs(result, left); 4395 __ Negs(result, left);
4407 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 4396 DeoptimizeIf(vs, instr, "overflow");
4408 } else { 4397 } else {
4409 __ Neg(result, left); 4398 __ Neg(result, left);
4410 } 4399 }
4411 break; 4400 break;
4412 case 0: 4401 case 0:
4413 // This case can never overflow. 4402 // This case can never overflow.
4414 __ Mov(result, 0); 4403 __ Mov(result, 0);
4415 break; 4404 break;
4416 case 1: 4405 case 1:
4417 // This case can never overflow. 4406 // This case can never overflow.
4418 __ Mov(result, left, kDiscardForSameWReg); 4407 __ Mov(result, left, kDiscardForSameWReg);
4419 break; 4408 break;
4420 case 2: 4409 case 2:
4421 if (can_overflow) { 4410 if (can_overflow) {
4422 __ Adds(result, left, left); 4411 __ Adds(result, left, left);
4423 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 4412 DeoptimizeIf(vs, instr, "overflow");
4424 } else { 4413 } else {
4425 __ Add(result, left, left); 4414 __ Add(result, left, left);
4426 } 4415 }
4427 break; 4416 break;
4428 4417
4429 default: 4418 default:
4430 // Multiplication by constant powers of two (and some related values) 4419 // Multiplication by constant powers of two (and some related values)
4431 // can be done efficiently with shifted operands. 4420 // can be done efficiently with shifted operands.
4432 int32_t right_abs = Abs(right); 4421 int32_t right_abs = Abs(right);
4433 4422
4434 if (base::bits::IsPowerOfTwo32(right_abs)) { 4423 if (base::bits::IsPowerOfTwo32(right_abs)) {
4435 int right_log2 = WhichPowerOf2(right_abs); 4424 int right_log2 = WhichPowerOf2(right_abs);
4436 4425
4437 if (can_overflow) { 4426 if (can_overflow) {
4438 Register scratch = result; 4427 Register scratch = result;
4439 DCHECK(!AreAliased(scratch, left)); 4428 DCHECK(!AreAliased(scratch, left));
4440 __ Cls(scratch, left); 4429 __ Cls(scratch, left);
4441 __ Cmp(scratch, right_log2); 4430 __ Cmp(scratch, right_log2);
4442 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow); 4431 DeoptimizeIf(lt, instr, "overflow");
4443 } 4432 }
4444 4433
4445 if (right >= 0) { 4434 if (right >= 0) {
4446 // result = left << log2(right) 4435 // result = left << log2(right)
4447 __ Lsl(result, left, right_log2); 4436 __ Lsl(result, left, right_log2);
4448 } else { 4437 } else {
4449 // result = -left << log2(-right) 4438 // result = -left << log2(-right)
4450 if (can_overflow) { 4439 if (can_overflow) {
4451 __ Negs(result, Operand(left, LSL, right_log2)); 4440 __ Negs(result, Operand(left, LSL, right_log2));
4452 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 4441 DeoptimizeIf(vs, instr, "overflow");
4453 } else { 4442 } else {
4454 __ Neg(result, Operand(left, LSL, right_log2)); 4443 __ Neg(result, Operand(left, LSL, right_log2));
4455 } 4444 }
4456 } 4445 }
4457 return; 4446 return;
4458 } 4447 }
4459 4448
4460 4449
4461 // For the following cases, we could perform a conservative overflow check 4450 // For the following cases, we could perform a conservative overflow check
4462 // with CLS as above. However the few cycles saved are likely not worth 4451 // with CLS as above. However the few cycles saved are likely not worth
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
4500 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 4489 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4501 4490
4502 if (bailout_on_minus_zero && !left.Is(right)) { 4491 if (bailout_on_minus_zero && !left.Is(right)) {
4503 // If one operand is zero and the other is negative, the result is -0. 4492 // If one operand is zero and the other is negative, the result is -0.
4504 // - Set Z (eq) if either left or right, or both, are 0. 4493 // - Set Z (eq) if either left or right, or both, are 0.
4505 __ Cmp(left, 0); 4494 __ Cmp(left, 0);
4506 __ Ccmp(right, 0, ZFlag, ne); 4495 __ Ccmp(right, 0, ZFlag, ne);
4507 // - If so (eq), set N (mi) if left + right is negative. 4496 // - If so (eq), set N (mi) if left + right is negative.
4508 // - Otherwise, clear N. 4497 // - Otherwise, clear N.
4509 __ Ccmn(left, right, NoFlag, eq); 4498 __ Ccmn(left, right, NoFlag, eq);
4510 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); 4499 DeoptimizeIf(mi, instr, "minus zero");
4511 } 4500 }
4512 4501
4513 if (can_overflow) { 4502 if (can_overflow) {
4514 __ Smull(result.X(), left, right); 4503 __ Smull(result.X(), left, right);
4515 __ Cmp(result.X(), Operand(result, SXTW)); 4504 __ Cmp(result.X(), Operand(result, SXTW));
4516 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 4505 DeoptimizeIf(ne, instr, "overflow");
4517 } else { 4506 } else {
4518 __ Mul(result, left, right); 4507 __ Mul(result, left, right);
4519 } 4508 }
4520 } 4509 }
4521 4510
4522 4511
4523 void LCodeGen::DoMulS(LMulS* instr) { 4512 void LCodeGen::DoMulS(LMulS* instr) {
4524 Register result = ToRegister(instr->result()); 4513 Register result = ToRegister(instr->result());
4525 Register left = ToRegister(instr->left()); 4514 Register left = ToRegister(instr->left());
4526 Register right = ToRegister(instr->right()); 4515 Register right = ToRegister(instr->right());
4527 4516
4528 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 4517 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4529 bool bailout_on_minus_zero = 4518 bool bailout_on_minus_zero =
4530 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 4519 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4531 4520
4532 if (bailout_on_minus_zero && !left.Is(right)) { 4521 if (bailout_on_minus_zero && !left.Is(right)) {
4533 // If one operand is zero and the other is negative, the result is -0. 4522 // If one operand is zero and the other is negative, the result is -0.
4534 // - Set Z (eq) if either left or right, or both, are 0. 4523 // - Set Z (eq) if either left or right, or both, are 0.
4535 __ Cmp(left, 0); 4524 __ Cmp(left, 0);
4536 __ Ccmp(right, 0, ZFlag, ne); 4525 __ Ccmp(right, 0, ZFlag, ne);
4537 // - If so (eq), set N (mi) if left + right is negative. 4526 // - If so (eq), set N (mi) if left + right is negative.
4538 // - Otherwise, clear N. 4527 // - Otherwise, clear N.
4539 __ Ccmn(left, right, NoFlag, eq); 4528 __ Ccmn(left, right, NoFlag, eq);
4540 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); 4529 DeoptimizeIf(mi, instr, "minus zero");
4541 } 4530 }
4542 4531
4543 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); 4532 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4544 if (can_overflow) { 4533 if (can_overflow) {
4545 __ Smulh(result, left, right); 4534 __ Smulh(result, left, right);
4546 __ Cmp(result, Operand(result.W(), SXTW)); 4535 __ Cmp(result, Operand(result.W(), SXTW));
4547 __ SmiTag(result); 4536 __ SmiTag(result);
4548 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 4537 DeoptimizeIf(ne, instr, "overflow");
4549 } else { 4538 } else {
4550 if (AreAliased(result, left, right)) { 4539 if (AreAliased(result, left, right)) {
4551 // All three registers are the same: half untag the input and then 4540 // All three registers are the same: half untag the input and then
4552 // multiply, giving a tagged result. 4541 // multiply, giving a tagged result.
4553 STATIC_ASSERT((kSmiShift % 2) == 0); 4542 STATIC_ASSERT((kSmiShift % 2) == 0);
4554 __ Asr(result, left, kSmiShift / 2); 4543 __ Asr(result, left, kSmiShift / 2);
4555 __ Mul(result, result, result); 4544 __ Mul(result, result, result);
4556 } else if (result.Is(left) && !left.Is(right)) { 4545 } else if (result.Is(left) && !left.Is(right)) {
4557 // Registers result and left alias, right is distinct: untag left into 4546 // Registers result and left alias, right is distinct: untag left into
4558 // result, and then multiply by right, giving a tagged result. 4547 // result, and then multiply by right, giving a tagged result.
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
4714 // Heap number map check. 4703 // Heap number map check.
4715 if (can_convert_undefined_to_nan) { 4704 if (can_convert_undefined_to_nan) {
4716 __ JumpIfNotHeapNumber(input, &convert_undefined); 4705 __ JumpIfNotHeapNumber(input, &convert_undefined);
4717 } else { 4706 } else {
4718 DeoptimizeIfNotHeapNumber(input, instr); 4707 DeoptimizeIfNotHeapNumber(input, instr);
4719 } 4708 }
4720 4709
4721 // Load heap number. 4710 // Load heap number.
4722 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); 4711 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4723 if (instr->hydrogen()->deoptimize_on_minus_zero()) { 4712 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4724 DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero); 4713 DeoptimizeIfMinusZero(result, instr, "minus zero");
4725 } 4714 }
4726 __ B(&done); 4715 __ B(&done);
4727 4716
4728 if (can_convert_undefined_to_nan) { 4717 if (can_convert_undefined_to_nan) {
4729 __ Bind(&convert_undefined); 4718 __ Bind(&convert_undefined);
4730 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, 4719 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
4731 Deoptimizer::kNotAHeapNumberUndefined); 4720 "not a heap number/undefined");
4732 4721
4733 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 4722 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4734 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); 4723 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4735 __ B(&done); 4724 __ B(&done);
4736 } 4725 }
4737 4726
4738 } else { 4727 } else {
4739 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 4728 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4740 // Fall through to load_smi. 4729 // Fall through to load_smi.
4741 } 4730 }
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after
4915 } 4904 }
4916 } 4905 }
4917 4906
4918 4907
4919 void LCodeGen::DoSmiTag(LSmiTag* instr) { 4908 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4920 HChange* hchange = instr->hydrogen(); 4909 HChange* hchange = instr->hydrogen();
4921 Register input = ToRegister(instr->value()); 4910 Register input = ToRegister(instr->value());
4922 Register output = ToRegister(instr->result()); 4911 Register output = ToRegister(instr->result());
4923 if (hchange->CheckFlag(HValue::kCanOverflow) && 4912 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4924 hchange->value()->CheckFlag(HValue::kUint32)) { 4913 hchange->value()->CheckFlag(HValue::kUint32)) {
4925 DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow); 4914 DeoptimizeIfNegative(input.W(), instr, "overflow");
4926 } 4915 }
4927 __ SmiTag(output, input); 4916 __ SmiTag(output, input);
4928 } 4917 }
4929 4918
4930 4919
4931 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4920 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4932 Register input = ToRegister(instr->value()); 4921 Register input = ToRegister(instr->value());
4933 Register result = ToRegister(instr->result()); 4922 Register result = ToRegister(instr->result());
4934 Label done, untag; 4923 Label done, untag;
4935 4924
4936 if (instr->needs_check()) { 4925 if (instr->needs_check()) {
4937 DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi); 4926 DeoptimizeIfNotSmi(input, instr, "not a Smi");
4938 } 4927 }
4939 4928
4940 __ Bind(&untag); 4929 __ Bind(&untag);
4941 __ SmiUntag(result, input); 4930 __ SmiUntag(result, input);
4942 __ Bind(&done); 4931 __ Bind(&done);
4943 } 4932 }
4944 4933
4945 4934
4946 void LCodeGen::DoShiftI(LShiftI* instr) { 4935 void LCodeGen::DoShiftI(LShiftI* instr) {
4947 LOperand* right_op = instr->right(); 4936 LOperand* right_op = instr->right();
4948 Register left = ToRegister32(instr->left()); 4937 Register left = ToRegister32(instr->left());
4949 Register result = ToRegister32(instr->result()); 4938 Register result = ToRegister32(instr->result());
4950 4939
4951 if (right_op->IsRegister()) { 4940 if (right_op->IsRegister()) {
4952 Register right = ToRegister32(instr->right()); 4941 Register right = ToRegister32(instr->right());
4953 switch (instr->op()) { 4942 switch (instr->op()) {
4954 case Token::ROR: __ Ror(result, left, right); break; 4943 case Token::ROR: __ Ror(result, left, right); break;
4955 case Token::SAR: __ Asr(result, left, right); break; 4944 case Token::SAR: __ Asr(result, left, right); break;
4956 case Token::SHL: __ Lsl(result, left, right); break; 4945 case Token::SHL: __ Lsl(result, left, right); break;
4957 case Token::SHR: 4946 case Token::SHR:
4958 __ Lsr(result, left, right); 4947 __ Lsr(result, left, right);
4959 if (instr->can_deopt()) { 4948 if (instr->can_deopt()) {
4960 // If `left >>> right` >= 0x80000000, the result is not representable 4949 // If `left >>> right` >= 0x80000000, the result is not representable
4961 // in a signed 32-bit smi. 4950 // in a signed 32-bit smi.
4962 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue); 4951 DeoptimizeIfNegative(result, instr, "negative value");
4963 } 4952 }
4964 break; 4953 break;
4965 default: UNREACHABLE(); 4954 default: UNREACHABLE();
4966 } 4955 }
4967 } else { 4956 } else {
4968 DCHECK(right_op->IsConstantOperand()); 4957 DCHECK(right_op->IsConstantOperand());
4969 int shift_count = JSShiftAmountFromLConstant(right_op); 4958 int shift_count = JSShiftAmountFromLConstant(right_op);
4970 if (shift_count == 0) { 4959 if (shift_count == 0) {
4971 if ((instr->op() == Token::SHR) && instr->can_deopt()) { 4960 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4972 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue); 4961 DeoptimizeIfNegative(left, instr, "negative value");
4973 } 4962 }
4974 __ Mov(result, left, kDiscardForSameWReg); 4963 __ Mov(result, left, kDiscardForSameWReg);
4975 } else { 4964 } else {
4976 switch (instr->op()) { 4965 switch (instr->op()) {
4977 case Token::ROR: __ Ror(result, left, shift_count); break; 4966 case Token::ROR: __ Ror(result, left, shift_count); break;
4978 case Token::SAR: __ Asr(result, left, shift_count); break; 4967 case Token::SAR: __ Asr(result, left, shift_count); break;
4979 case Token::SHL: __ Lsl(result, left, shift_count); break; 4968 case Token::SHL: __ Lsl(result, left, shift_count); break;
4980 case Token::SHR: __ Lsr(result, left, shift_count); break; 4969 case Token::SHR: __ Lsr(result, left, shift_count); break;
4981 default: UNREACHABLE(); 4970 default: UNREACHABLE();
4982 } 4971 }
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
5015 break; 5004 break;
5016 case Token::SHL: 5005 case Token::SHL:
5017 __ Lsl(result, left, result); 5006 __ Lsl(result, left, result);
5018 break; 5007 break;
5019 case Token::SHR: 5008 case Token::SHR:
5020 __ Lsr(result, left, result); 5009 __ Lsr(result, left, result);
5021 __ Bic(result, result, kSmiShiftMask); 5010 __ Bic(result, result, kSmiShiftMask);
5022 if (instr->can_deopt()) { 5011 if (instr->can_deopt()) {
5023 // If `left >>> right` >= 0x80000000, the result is not representable 5012 // If `left >>> right` >= 0x80000000, the result is not representable
5024 // in a signed 32-bit smi. 5013 // in a signed 32-bit smi.
5025 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue); 5014 DeoptimizeIfNegative(result, instr, "negative value");
5026 } 5015 }
5027 break; 5016 break;
5028 default: UNREACHABLE(); 5017 default: UNREACHABLE();
5029 } 5018 }
5030 } else { 5019 } else {
5031 DCHECK(right_op->IsConstantOperand()); 5020 DCHECK(right_op->IsConstantOperand());
5032 int shift_count = JSShiftAmountFromLConstant(right_op); 5021 int shift_count = JSShiftAmountFromLConstant(right_op);
5033 if (shift_count == 0) { 5022 if (shift_count == 0) {
5034 if ((instr->op() == Token::SHR) && instr->can_deopt()) { 5023 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
5035 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue); 5024 DeoptimizeIfNegative(left, instr, "negative value");
5036 } 5025 }
5037 __ Mov(result, left); 5026 __ Mov(result, left);
5038 } else { 5027 } else {
5039 switch (instr->op()) { 5028 switch (instr->op()) {
5040 case Token::ROR: 5029 case Token::ROR:
5041 __ SmiUntag(result, left); 5030 __ SmiUntag(result, left);
5042 __ Ror(result.W(), result.W(), shift_count); 5031 __ Ror(result.W(), result.W(), shift_count);
5043 __ SmiTag(result); 5032 __ SmiTag(result);
5044 break; 5033 break;
5045 case Token::SAR: 5034 case Token::SAR:
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
5153 Register context = ToRegister(instr->context()); 5142 Register context = ToRegister(instr->context());
5154 Register value = ToRegister(instr->value()); 5143 Register value = ToRegister(instr->value());
5155 Register scratch = ToRegister(instr->temp()); 5144 Register scratch = ToRegister(instr->temp());
5156 MemOperand target = ContextMemOperand(context, instr->slot_index()); 5145 MemOperand target = ContextMemOperand(context, instr->slot_index());
5157 5146
5158 Label skip_assignment; 5147 Label skip_assignment;
5159 5148
5160 if (instr->hydrogen()->RequiresHoleCheck()) { 5149 if (instr->hydrogen()->RequiresHoleCheck()) {
5161 __ Ldr(scratch, target); 5150 __ Ldr(scratch, target);
5162 if (instr->hydrogen()->DeoptimizesOnHole()) { 5151 if (instr->hydrogen()->DeoptimizesOnHole()) {
5163 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, 5152 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, "hole");
5164 Deoptimizer::kHole);
5165 } else { 5153 } else {
5166 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); 5154 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5167 } 5155 }
5168 } 5156 }
5169 5157
5170 __ Str(value, target); 5158 __ Str(value, target);
5171 if (instr->hydrogen()->NeedsWriteBarrier()) { 5159 if (instr->hydrogen()->NeedsWriteBarrier()) {
5172 SmiCheck check_needed = 5160 SmiCheck check_needed =
5173 instr->hydrogen()->value()->type().IsHeapObject() 5161 instr->hydrogen()->value()->type().IsHeapObject()
5174 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 5162 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
(...skipping 17 matching lines...) Expand all
5192 // Load the cell. 5180 // Load the cell.
5193 __ Mov(cell, Operand(instr->hydrogen()->cell().handle())); 5181 __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
5194 5182
5195 // If the cell we are storing to contains the hole it could have 5183 // If the cell we are storing to contains the hole it could have
5196 // been deleted from the property dictionary. In that case, we need 5184 // been deleted from the property dictionary. In that case, we need
5197 // to update the property details in the property dictionary to mark 5185 // to update the property details in the property dictionary to mark
5198 // it as no longer deleted. We deoptimize in that case. 5186 // it as no longer deleted. We deoptimize in that case.
5199 if (instr->hydrogen()->RequiresHoleCheck()) { 5187 if (instr->hydrogen()->RequiresHoleCheck()) {
5200 Register payload = ToRegister(instr->temp2()); 5188 Register payload = ToRegister(instr->temp2());
5201 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); 5189 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
5202 DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, 5190 DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, "hole");
5203 Deoptimizer::kHole);
5204 } 5191 }
5205 5192
5206 // Store the value. 5193 // Store the value.
5207 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset)); 5194 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
5208 // Cells are always rescanned, so no write barrier here. 5195 // Cells are always rescanned, so no write barrier here.
5209 } 5196 }
5210 5197
5211 5198
5212 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { 5199 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5213 Register ext_ptr = ToRegister(instr->elements()); 5200 Register ext_ptr = ToRegister(instr->elements());
(...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after
5615 5602
5616 5603
5617 void LCodeGen::DoSubI(LSubI* instr) { 5604 void LCodeGen::DoSubI(LSubI* instr) {
5618 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 5605 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5619 Register result = ToRegister32(instr->result()); 5606 Register result = ToRegister32(instr->result());
5620 Register left = ToRegister32(instr->left()); 5607 Register left = ToRegister32(instr->left());
5621 Operand right = ToShiftedRightOperand32(instr->right(), instr); 5608 Operand right = ToShiftedRightOperand32(instr->right(), instr);
5622 5609
5623 if (can_overflow) { 5610 if (can_overflow) {
5624 __ Subs(result, left, right); 5611 __ Subs(result, left, right);
5625 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 5612 DeoptimizeIf(vs, instr, "overflow");
5626 } else { 5613 } else {
5627 __ Sub(result, left, right); 5614 __ Sub(result, left, right);
5628 } 5615 }
5629 } 5616 }
5630 5617
5631 5618
5632 void LCodeGen::DoSubS(LSubS* instr) { 5619 void LCodeGen::DoSubS(LSubS* instr) {
5633 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 5620 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5634 Register result = ToRegister(instr->result()); 5621 Register result = ToRegister(instr->result());
5635 Register left = ToRegister(instr->left()); 5622 Register left = ToRegister(instr->left());
5636 Operand right = ToOperand(instr->right()); 5623 Operand right = ToOperand(instr->right());
5637 if (can_overflow) { 5624 if (can_overflow) {
5638 __ Subs(result, left, right); 5625 __ Subs(result, left, right);
5639 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 5626 DeoptimizeIf(vs, instr, "overflow");
5640 } else { 5627 } else {
5641 __ Sub(result, left, right); 5628 __ Sub(result, left, right);
5642 } 5629 }
5643 } 5630 }
5644 5631
5645 5632
5646 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, 5633 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5647 LOperand* value, 5634 LOperand* value,
5648 LOperand* temp1, 5635 LOperand* temp1,
5649 LOperand* temp2) { 5636 LOperand* temp2) {
(...skipping 20 matching lines...) Expand all
5670 Register false_root = scratch1; 5657 Register false_root = scratch1;
5671 __ LoadTrueFalseRoots(true_root, false_root); 5658 __ LoadTrueFalseRoots(true_root, false_root);
5672 __ Cmp(input, true_root); 5659 __ Cmp(input, true_root);
5673 __ Cset(output, eq); 5660 __ Cset(output, eq);
5674 __ Ccmp(input, false_root, ZFlag, ne); 5661 __ Ccmp(input, false_root, ZFlag, ne);
5675 __ B(eq, &done); 5662 __ B(eq, &done);
5676 5663
5677 // Output contains zero, undefined is converted to zero for truncating 5664 // Output contains zero, undefined is converted to zero for truncating
5678 // conversions. 5665 // conversions.
5679 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, 5666 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
5680 Deoptimizer::kNotAHeapNumberUndefinedBoolean); 5667 "not a heap number/undefined/true/false");
5681 } else { 5668 } else {
5682 Register output = ToRegister32(instr->result()); 5669 Register output = ToRegister32(instr->result());
5683 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); 5670 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5684 5671
5685 DeoptimizeIfNotHeapNumber(input, instr); 5672 DeoptimizeIfNotHeapNumber(input, instr);
5686 5673
5687 // A heap number: load value and convert to int32 using non-truncating 5674 // A heap number: load value and convert to int32 using non-truncating
5688 // function. If the result is out of range, branch to deoptimize. 5675 // function. If the result is out of range, branch to deoptimize.
5689 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); 5676 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5690 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); 5677 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
5691 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 5678 DeoptimizeIf(ne, instr, "lost precision or NaN");
5692 5679
5693 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5680 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5694 __ Cmp(output, 0); 5681 __ Cmp(output, 0);
5695 __ B(ne, &done); 5682 __ B(ne, &done);
5696 __ Fmov(scratch1, dbl_scratch1); 5683 __ Fmov(scratch1, dbl_scratch1);
5697 DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero); 5684 DeoptimizeIfNegative(scratch1, instr, "minus zero");
5698 } 5685 }
5699 } 5686 }
5700 __ Bind(&done); 5687 __ Bind(&done);
5701 } 5688 }
5702 5689
5703 5690
5704 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 5691 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5705 class DeferredTaggedToI: public LDeferredCode { 5692 class DeferredTaggedToI: public LDeferredCode {
5706 public: 5693 public:
5707 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 5694 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
5828 } 5815 }
5829 5816
5830 5817
5831 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 5818 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5832 Register object = ToRegister(instr->object()); 5819 Register object = ToRegister(instr->object());
5833 Register temp1 = ToRegister(instr->temp1()); 5820 Register temp1 = ToRegister(instr->temp1());
5834 Register temp2 = ToRegister(instr->temp2()); 5821 Register temp2 = ToRegister(instr->temp2());
5835 5822
5836 Label no_memento_found; 5823 Label no_memento_found;
5837 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); 5824 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
5838 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); 5825 DeoptimizeIf(eq, instr, "memento found");
5839 __ Bind(&no_memento_found); 5826 __ Bind(&no_memento_found);
5840 } 5827 }
5841 5828
5842 5829
5843 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) { 5830 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5844 DoubleRegister input = ToDoubleRegister(instr->value()); 5831 DoubleRegister input = ToDoubleRegister(instr->value());
5845 Register result = ToRegister(instr->result()); 5832 Register result = ToRegister(instr->result());
5846 __ TruncateDoubleToI(result, input); 5833 __ TruncateDoubleToI(result, input);
5847 if (instr->tag_result()) { 5834 if (instr->tag_result()) {
5848 __ SmiTag(result, result); 5835 __ SmiTag(result, result);
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
5953 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); 5940 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5954 } 5941 }
5955 5942
5956 5943
5957 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5944 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5958 Register object = ToRegister(instr->value()); 5945 Register object = ToRegister(instr->value());
5959 Register map = ToRegister(instr->map()); 5946 Register map = ToRegister(instr->map());
5960 Register temp = ToRegister(instr->temp()); 5947 Register temp = ToRegister(instr->temp());
5961 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); 5948 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5962 __ Cmp(map, temp); 5949 __ Cmp(map, temp);
5963 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); 5950 DeoptimizeIf(ne, instr, "wrong map");
5964 } 5951 }
5965 5952
5966 5953
5967 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 5954 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5968 Register receiver = ToRegister(instr->receiver()); 5955 Register receiver = ToRegister(instr->receiver());
5969 Register function = ToRegister(instr->function()); 5956 Register function = ToRegister(instr->function());
5970 Register result = ToRegister(instr->result()); 5957 Register result = ToRegister(instr->result());
5971 5958
5972 // If the receiver is null or undefined, we have to pass the global object as 5959 // If the receiver is null or undefined, we have to pass the global object as
5973 // a receiver to normal functions. Values have to be passed unchanged to 5960 // a receiver to normal functions. Values have to be passed unchanged to
(...skipping 13 matching lines...) Expand all
5987 5974
5988 // Do not transform the receiver to object for builtins. 5975 // Do not transform the receiver to object for builtins.
5989 __ Tbnz(result, SharedFunctionInfo::kNative, &copy_receiver); 5976 __ Tbnz(result, SharedFunctionInfo::kNative, &copy_receiver);
5990 } 5977 }
5991 5978
5992 // Normal function. Replace undefined or null with global receiver. 5979 // Normal function. Replace undefined or null with global receiver.
5993 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object); 5980 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5994 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); 5981 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5995 5982
5996 // Deoptimize if the receiver is not a JS object. 5983 // Deoptimize if the receiver is not a JS object.
5997 DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi); 5984 DeoptimizeIfSmi(receiver, instr, "Smi");
5998 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE); 5985 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5999 __ B(ge, &copy_receiver); 5986 __ B(ge, &copy_receiver);
6000 Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject); 5987 Deoptimize(instr, "not a JavaScript object");
6001 5988
6002 __ Bind(&global_object); 5989 __ Bind(&global_object);
6003 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); 5990 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
6004 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX)); 5991 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
6005 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); 5992 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
6006 __ B(&done); 5993 __ B(&done);
6007 5994
6008 __ Bind(&copy_receiver); 5995 __ Bind(&copy_receiver);
6009 __ Mov(result, receiver); 5996 __ Mov(result, receiver);
6010 __ Bind(&done); 5997 __ Bind(&done);
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
6097 Handle<ScopeInfo> scope_info = instr->scope_info(); 6084 Handle<ScopeInfo> scope_info = instr->scope_info();
6098 __ Push(scope_info); 6085 __ Push(scope_info);
6099 __ Push(ToRegister(instr->function())); 6086 __ Push(ToRegister(instr->function()));
6100 CallRuntime(Runtime::kPushBlockContext, 2, instr); 6087 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6101 RecordSafepoint(Safepoint::kNoLazyDeopt); 6088 RecordSafepoint(Safepoint::kNoLazyDeopt);
6102 } 6089 }
6103 6090
6104 6091
6105 6092
6106 } } // namespace v8::internal 6093 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm64/lithium-codegen-arm64.h ('k') | src/assembler.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698