Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/arm64/lithium-codegen-arm64.cc

Issue 874323003: Externalize deoptimization reasons. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: arm64 Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/arm64/lithium-codegen-arm64.h" 7 #include "src/arm64/lithium-codegen-arm64.h"
8 #include "src/arm64/lithium-gap-resolver-arm64.h" 8 #include "src/arm64/lithium-gap-resolver-arm64.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/code-factory.h" 10 #include "src/code-factory.h"
(...skipping 988 matching lines...) Expand 10 before | Expand all | Expand 10 after
999 999
1000 for (int i = 0, length = inlined_closures->length(); i < length; i++) { 1000 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
1001 DefineDeoptimizationLiteral(inlined_closures->at(i)); 1001 DefineDeoptimizationLiteral(inlined_closures->at(i));
1002 } 1002 }
1003 1003
1004 inlined_function_count_ = deoptimization_literals_.length(); 1004 inlined_function_count_ = deoptimization_literals_.length();
1005 } 1005 }
1006 1006
1007 1007
1008 void LCodeGen::DeoptimizeBranch( 1008 void LCodeGen::DeoptimizeBranch(
1009 LInstruction* instr, const char* detail, BranchType branch_type, 1009 LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
1010 Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) { 1010 BranchType branch_type, Register reg, int bit,
1011 Deoptimizer::BailoutType* override_bailout_type) {
1011 LEnvironment* environment = instr->environment(); 1012 LEnvironment* environment = instr->environment();
1012 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 1013 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1013 Deoptimizer::BailoutType bailout_type = 1014 Deoptimizer::BailoutType bailout_type =
1014 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; 1015 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
1015 1016
1016 if (override_bailout_type != NULL) { 1017 if (override_bailout_type != NULL) {
1017 bailout_type = *override_bailout_type; 1018 bailout_type = *override_bailout_type;
1018 } 1019 }
1019 1020
1020 DCHECK(environment->HasBeenRegistered()); 1021 DCHECK(environment->HasBeenRegistered());
(...skipping 30 matching lines...) Expand all
1051 } 1052 }
1052 1053
1053 if (info()->ShouldTrapOnDeopt()) { 1054 if (info()->ShouldTrapOnDeopt()) {
1054 Label dont_trap; 1055 Label dont_trap;
1055 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit); 1056 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1056 __ Debug("trap_on_deopt", __LINE__, BREAK); 1057 __ Debug("trap_on_deopt", __LINE__, BREAK);
1057 __ Bind(&dont_trap); 1058 __ Bind(&dont_trap);
1058 } 1059 }
1059 1060
1060 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), 1061 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
1061 instr->Mnemonic(), detail); 1062 instr->Mnemonic(), deopt_reason);
1062 DCHECK(info()->IsStub() || frame_is_built_); 1063 DCHECK(info()->IsStub() || frame_is_built_);
1063 // Go through jump table if we need to build frame, or restore caller doubles. 1064 // Go through jump table if we need to build frame, or restore caller doubles.
1064 if (branch_type == always && 1065 if (branch_type == always &&
1065 frame_is_built_ && !info()->saves_caller_doubles()) { 1066 frame_is_built_ && !info()->saves_caller_doubles()) {
1066 DeoptComment(reason); 1067 DeoptComment(reason);
1067 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 1068 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1068 } else { 1069 } else {
1069 Deoptimizer::JumpTableEntry* table_entry = 1070 Deoptimizer::JumpTableEntry* table_entry =
1070 new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type, 1071 new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
1071 !frame_is_built_); 1072 !frame_is_built_);
1072 // We often have several deopts to the same entry, reuse the last 1073 // We often have several deopts to the same entry, reuse the last
1073 // jump entry if this is the case. 1074 // jump entry if this is the case.
1074 if (jump_table_.is_empty() || 1075 if (jump_table_.is_empty() ||
1075 !table_entry->IsEquivalentTo(*jump_table_.last())) { 1076 !table_entry->IsEquivalentTo(*jump_table_.last())) {
1076 jump_table_.Add(table_entry, zone()); 1077 jump_table_.Add(table_entry, zone());
1077 } 1078 }
1078 __ B(&jump_table_.last()->label, branch_type, reg, bit); 1079 __ B(&jump_table_.last()->label, branch_type, reg, bit);
1079 } 1080 }
1080 } 1081 }
1081 1082
1082 1083
1083 void LCodeGen::Deoptimize(LInstruction* instr, const char* detail, 1084 void LCodeGen::Deoptimize(LInstruction* instr,
1085 Deoptimizer::DeoptReason deopt_reason,
1084 Deoptimizer::BailoutType* override_bailout_type) { 1086 Deoptimizer::BailoutType* override_bailout_type) {
1085 DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type); 1087 DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
1088 override_bailout_type);
1086 } 1089 }
1087 1090
1088 1091
1089 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, 1092 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
1090 const char* detail) { 1093 Deoptimizer::DeoptReason deopt_reason) {
1091 DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond)); 1094 DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
1092 } 1095 }
1093 1096
1094 1097
1095 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr, 1098 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
1096 const char* detail) { 1099 Deoptimizer::DeoptReason deopt_reason) {
1097 DeoptimizeBranch(instr, detail, reg_zero, rt); 1100 DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
1098 } 1101 }
1099 1102
1100 1103
1101 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr, 1104 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
1102 const char* detail) { 1105 Deoptimizer::DeoptReason deopt_reason) {
1103 DeoptimizeBranch(instr, detail, reg_not_zero, rt); 1106 DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
1104 } 1107 }
1105 1108
1106 1109
1107 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr, 1110 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
1108 const char* detail) { 1111 Deoptimizer::DeoptReason deopt_reason) {
1109 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; 1112 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1110 DeoptimizeIfBitSet(rt, sign_bit, instr, detail); 1113 DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
1111 } 1114 }
1112 1115
1113 1116
1114 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr, 1117 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
1115 const char* detail) { 1118 Deoptimizer::DeoptReason deopt_reason) {
1116 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail); 1119 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
1117 } 1120 }
1118 1121
1119 1122
1120 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr, 1123 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
1121 const char* detail) { 1124 Deoptimizer::DeoptReason deopt_reason) {
1122 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail); 1125 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
1123 } 1126 }
1124 1127
1125 1128
1126 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, 1129 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
1127 LInstruction* instr, const char* detail) { 1130 LInstruction* instr,
1131 Deoptimizer::DeoptReason deopt_reason) {
1128 __ CompareRoot(rt, index); 1132 __ CompareRoot(rt, index);
1129 DeoptimizeIf(eq, instr, detail); 1133 DeoptimizeIf(eq, instr, deopt_reason);
1130 } 1134 }
1131 1135
1132 1136
1133 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, 1137 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
1134 LInstruction* instr, const char* detail) { 1138 LInstruction* instr,
1139 Deoptimizer::DeoptReason deopt_reason) {
1135 __ CompareRoot(rt, index); 1140 __ CompareRoot(rt, index);
1136 DeoptimizeIf(ne, instr, detail); 1141 DeoptimizeIf(ne, instr, deopt_reason);
1137 } 1142 }
1138 1143
1139 1144
1140 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, 1145 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
1141 const char* detail) { 1146 Deoptimizer::DeoptReason deopt_reason) {
1142 __ TestForMinusZero(input); 1147 __ TestForMinusZero(input);
1143 DeoptimizeIf(vs, instr, detail); 1148 DeoptimizeIf(vs, instr, deopt_reason);
1144 } 1149 }
1145 1150
1146 1151
1147 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) { 1152 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
1148 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex); 1153 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
1149 DeoptimizeIf(ne, instr, "not heap number"); 1154 DeoptimizeIf(ne, instr, Deoptimizer::kNotHeapNumber);
1150 } 1155 }
1151 1156
1152 1157
1153 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, 1158 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
1154 const char* detail) { 1159 Deoptimizer::DeoptReason deopt_reason) {
1155 DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit); 1160 DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
1156 } 1161 }
1157 1162
1158 1163
1159 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, 1164 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
1160 const char* detail) { 1165 Deoptimizer::DeoptReason deopt_reason) {
1161 DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit); 1166 DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
1162 } 1167 }
1163 1168
1164 1169
1165 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 1170 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1166 if (!info()->IsStub()) { 1171 if (!info()->IsStub()) {
1167 // Ensure that we have enough space after the previous lazy-bailout 1172 // Ensure that we have enough space after the previous lazy-bailout
1168 // instruction for patching the code here. 1173 // instruction for patching the code here.
1169 intptr_t current_pc = masm()->pc_offset(); 1174 intptr_t current_pc = masm()->pc_offset();
1170 1175
1171 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { 1176 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
(...skipping 353 matching lines...) Expand 10 before | Expand all | Expand 10 after
1525 1530
1526 1531
1527 void LCodeGen::DoAddI(LAddI* instr) { 1532 void LCodeGen::DoAddI(LAddI* instr) {
1528 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1533 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1529 Register result = ToRegister32(instr->result()); 1534 Register result = ToRegister32(instr->result());
1530 Register left = ToRegister32(instr->left()); 1535 Register left = ToRegister32(instr->left());
1531 Operand right = ToShiftedRightOperand32(instr->right(), instr); 1536 Operand right = ToShiftedRightOperand32(instr->right(), instr);
1532 1537
1533 if (can_overflow) { 1538 if (can_overflow) {
1534 __ Adds(result, left, right); 1539 __ Adds(result, left, right);
1535 DeoptimizeIf(vs, instr, "overflow"); 1540 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1536 } else { 1541 } else {
1537 __ Add(result, left, right); 1542 __ Add(result, left, right);
1538 } 1543 }
1539 } 1544 }
1540 1545
1541 1546
1542 void LCodeGen::DoAddS(LAddS* instr) { 1547 void LCodeGen::DoAddS(LAddS* instr) {
1543 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1548 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1544 Register result = ToRegister(instr->result()); 1549 Register result = ToRegister(instr->result());
1545 Register left = ToRegister(instr->left()); 1550 Register left = ToRegister(instr->left());
1546 Operand right = ToOperand(instr->right()); 1551 Operand right = ToOperand(instr->right());
1547 if (can_overflow) { 1552 if (can_overflow) {
1548 __ Adds(result, left, right); 1553 __ Adds(result, left, right);
1549 DeoptimizeIf(vs, instr, "overflow"); 1554 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1550 } else { 1555 } else {
1551 __ Add(result, left, right); 1556 __ Add(result, left, right);
1552 } 1557 }
1553 } 1558 }
1554 1559
1555 1560
1556 void LCodeGen::DoAllocate(LAllocate* instr) { 1561 void LCodeGen::DoAllocate(LAllocate* instr) {
1557 class DeferredAllocate: public LDeferredCode { 1562 class DeferredAllocate: public LDeferredCode {
1558 public: 1563 public:
1559 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) 1564 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
1665 Register scratch = x5; 1670 Register scratch = x5;
1666 DCHECK(receiver.Is(x0)); // Used for parameter count. 1671 DCHECK(receiver.Is(x0)); // Used for parameter count.
1667 DCHECK(function.Is(x1)); // Required by InvokeFunction. 1672 DCHECK(function.Is(x1)); // Required by InvokeFunction.
1668 DCHECK(ToRegister(instr->result()).Is(x0)); 1673 DCHECK(ToRegister(instr->result()).Is(x0));
1669 DCHECK(instr->IsMarkedAsCall()); 1674 DCHECK(instr->IsMarkedAsCall());
1670 1675
1671 // Copy the arguments to this function possibly from the 1676 // Copy the arguments to this function possibly from the
1672 // adaptor frame below it. 1677 // adaptor frame below it.
1673 const uint32_t kArgumentsLimit = 1 * KB; 1678 const uint32_t kArgumentsLimit = 1 * KB;
1674 __ Cmp(length, kArgumentsLimit); 1679 __ Cmp(length, kArgumentsLimit);
1675 DeoptimizeIf(hi, instr, "too many arguments"); 1680 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
1676 1681
1677 // Push the receiver and use the register to keep the original 1682 // Push the receiver and use the register to keep the original
1678 // number of arguments. 1683 // number of arguments.
1679 __ Push(receiver); 1684 __ Push(receiver);
1680 Register argc = receiver; 1685 Register argc = receiver;
1681 receiver = NoReg; 1686 receiver = NoReg;
1682 __ Sxtw(argc, length); 1687 __ Sxtw(argc, length);
1683 // The arguments are at a one pointer size offset from elements. 1688 // The arguments are at a one pointer size offset from elements.
1684 __ Add(elements, elements, 1 * kPointerSize); 1689 __ Add(elements, elements, 1 * kPointerSize);
1685 1690
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
1847 __ Cmp(length, index); 1852 __ Cmp(length, index);
1848 cond = CommuteCondition(cond); 1853 cond = CommuteCondition(cond);
1849 } else { 1854 } else {
1850 Register index = ToRegister32(instr->index()); 1855 Register index = ToRegister32(instr->index());
1851 Operand length = ToOperand32(instr->length()); 1856 Operand length = ToOperand32(instr->length());
1852 __ Cmp(index, length); 1857 __ Cmp(index, length);
1853 } 1858 }
1854 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 1859 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
1855 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); 1860 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
1856 } else { 1861 } else {
1857 DeoptimizeIf(cond, instr, "out of bounds"); 1862 DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
1858 } 1863 }
1859 } 1864 }
1860 1865
1861 1866
1862 void LCodeGen::DoBranch(LBranch* instr) { 1867 void LCodeGen::DoBranch(LBranch* instr) {
1863 Representation r = instr->hydrogen()->value()->representation(); 1868 Representation r = instr->hydrogen()->value()->representation();
1864 Label* true_label = instr->TrueLabel(chunk_); 1869 Label* true_label = instr->TrueLabel(chunk_);
1865 Label* false_label = instr->FalseLabel(chunk_); 1870 Label* false_label = instr->FalseLabel(chunk_);
1866 1871
1867 if (r.IsInteger32()) { 1872 if (r.IsInteger32()) {
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1926 value, Heap::kNullValueRootIndex, false_label); 1931 value, Heap::kNullValueRootIndex, false_label);
1927 } 1932 }
1928 1933
1929 if (expected.Contains(ToBooleanStub::SMI)) { 1934 if (expected.Contains(ToBooleanStub::SMI)) {
1930 // Smis: 0 -> false, all other -> true. 1935 // Smis: 0 -> false, all other -> true.
1931 DCHECK(Smi::FromInt(0) == 0); 1936 DCHECK(Smi::FromInt(0) == 0);
1932 __ Cbz(value, false_label); 1937 __ Cbz(value, false_label);
1933 __ JumpIfSmi(value, true_label); 1938 __ JumpIfSmi(value, true_label);
1934 } else if (expected.NeedsMap()) { 1939 } else if (expected.NeedsMap()) {
1935 // If we need a map later and have a smi, deopt. 1940 // If we need a map later and have a smi, deopt.
1936 DeoptimizeIfSmi(value, instr, "Smi"); 1941 DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
1937 } 1942 }
1938 1943
1939 Register map = NoReg; 1944 Register map = NoReg;
1940 Register scratch = NoReg; 1945 Register scratch = NoReg;
1941 1946
1942 if (expected.NeedsMap()) { 1947 if (expected.NeedsMap()) {
1943 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); 1948 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
1944 map = ToRegister(instr->temp1()); 1949 map = ToRegister(instr->temp1());
1945 scratch = ToRegister(instr->temp2()); 1950 scratch = ToRegister(instr->temp2());
1946 1951
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
1987 // If we got a NaN (overflow bit is set), jump to the false branch. 1992 // If we got a NaN (overflow bit is set), jump to the false branch.
1988 __ B(vs, false_label); 1993 __ B(vs, false_label);
1989 __ B(eq, false_label); 1994 __ B(eq, false_label);
1990 __ B(true_label); 1995 __ B(true_label);
1991 __ Bind(&not_heap_number); 1996 __ Bind(&not_heap_number);
1992 } 1997 }
1993 1998
1994 if (!expected.IsGeneric()) { 1999 if (!expected.IsGeneric()) {
1995 // We've seen something for the first time -> deopt. 2000 // We've seen something for the first time -> deopt.
1996 // This can only happen if we are not generic already. 2001 // This can only happen if we are not generic already.
1997 Deoptimize(instr, "unexpected object"); 2002 Deoptimize(instr, Deoptimizer::kUnexpectedObject);
1998 } 2003 }
1999 } 2004 }
2000 } 2005 }
2001 } 2006 }
2002 2007
2003 2008
2004 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 2009 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2005 int formal_parameter_count, int arity, 2010 int formal_parameter_count, int arity,
2006 LInstruction* instr) { 2011 LInstruction* instr) {
2007 bool dont_adapt_arguments = 2012 bool dont_adapt_arguments =
(...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after
2193 Register temp = ToRegister(instr->temp()); 2198 Register temp = ToRegister(instr->temp());
2194 { 2199 {
2195 PushSafepointRegistersScope scope(this); 2200 PushSafepointRegistersScope scope(this);
2196 __ Push(object); 2201 __ Push(object);
2197 __ Mov(cp, 0); 2202 __ Mov(cp, 0);
2198 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 2203 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2199 RecordSafepointWithRegisters( 2204 RecordSafepointWithRegisters(
2200 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); 2205 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2201 __ StoreToSafepointRegisterSlot(x0, temp); 2206 __ StoreToSafepointRegisterSlot(x0, temp);
2202 } 2207 }
2203 DeoptimizeIfSmi(temp, instr, "instance migration failed"); 2208 DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
2204 } 2209 }
2205 2210
2206 2211
2207 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 2212 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2208 class DeferredCheckMaps: public LDeferredCode { 2213 class DeferredCheckMaps: public LDeferredCode {
2209 public: 2214 public:
2210 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 2215 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2211 : LDeferredCode(codegen), instr_(instr), object_(object) { 2216 : LDeferredCode(codegen), instr_(instr), object_(object) {
2212 SetExit(check_maps()); 2217 SetExit(check_maps());
2213 } 2218 }
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
2248 __ CompareMap(map_reg, map); 2253 __ CompareMap(map_reg, map);
2249 __ B(eq, &success); 2254 __ B(eq, &success);
2250 } 2255 }
2251 Handle<Map> map = maps->at(maps->size() - 1).handle(); 2256 Handle<Map> map = maps->at(maps->size() - 1).handle();
2252 __ CompareMap(map_reg, map); 2257 __ CompareMap(map_reg, map);
2253 2258
2254 // We didn't match a map. 2259 // We didn't match a map.
2255 if (instr->hydrogen()->HasMigrationTarget()) { 2260 if (instr->hydrogen()->HasMigrationTarget()) {
2256 __ B(ne, deferred->entry()); 2261 __ B(ne, deferred->entry());
2257 } else { 2262 } else {
2258 DeoptimizeIf(ne, instr, "wrong map"); 2263 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
2259 } 2264 }
2260 2265
2261 __ Bind(&success); 2266 __ Bind(&success);
2262 } 2267 }
2263 2268
2264 2269
2265 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 2270 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2266 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2271 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2267 DeoptimizeIfSmi(ToRegister(instr->value()), instr, "Smi"); 2272 DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
2268 } 2273 }
2269 } 2274 }
2270 2275
2271 2276
2272 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 2277 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2273 Register value = ToRegister(instr->value()); 2278 Register value = ToRegister(instr->value());
2274 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value)); 2279 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
2275 DeoptimizeIfNotSmi(value, instr, "not a Smi"); 2280 DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
2276 } 2281 }
2277 2282
2278 2283
2279 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 2284 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2280 Register input = ToRegister(instr->value()); 2285 Register input = ToRegister(instr->value());
2281 Register scratch = ToRegister(instr->temp()); 2286 Register scratch = ToRegister(instr->temp());
2282 2287
2283 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 2288 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2284 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 2289 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2285 2290
2286 if (instr->hydrogen()->is_interval_check()) { 2291 if (instr->hydrogen()->is_interval_check()) {
2287 InstanceType first, last; 2292 InstanceType first, last;
2288 instr->hydrogen()->GetCheckInterval(&first, &last); 2293 instr->hydrogen()->GetCheckInterval(&first, &last);
2289 2294
2290 __ Cmp(scratch, first); 2295 __ Cmp(scratch, first);
2291 if (first == last) { 2296 if (first == last) {
2292 // If there is only one type in the interval check for equality. 2297 // If there is only one type in the interval check for equality.
2293 DeoptimizeIf(ne, instr, "wrong instance type"); 2298 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
2294 } else if (last == LAST_TYPE) { 2299 } else if (last == LAST_TYPE) {
2295 // We don't need to compare with the higher bound of the interval. 2300 // We don't need to compare with the higher bound of the interval.
2296 DeoptimizeIf(lo, instr, "wrong instance type"); 2301 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
2297 } else { 2302 } else {
2298 // If we are below the lower bound, set the C flag and clear the Z flag 2303 // If we are below the lower bound, set the C flag and clear the Z flag
2299 // to force a deopt. 2304 // to force a deopt.
2300 __ Ccmp(scratch, last, CFlag, hs); 2305 __ Ccmp(scratch, last, CFlag, hs);
2301 DeoptimizeIf(hi, instr, "wrong instance type"); 2306 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
2302 } 2307 }
2303 } else { 2308 } else {
2304 uint8_t mask; 2309 uint8_t mask;
2305 uint8_t tag; 2310 uint8_t tag;
2306 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 2311 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2307 2312
2308 if (base::bits::IsPowerOfTwo32(mask)) { 2313 if (base::bits::IsPowerOfTwo32(mask)) {
2309 DCHECK((tag == 0) || (tag == mask)); 2314 DCHECK((tag == 0) || (tag == mask));
2310 if (tag == 0) { 2315 if (tag == 0) {
2311 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr, 2316 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
2312 "wrong instance type"); 2317 Deoptimizer::kWrongInstanceType);
2313 } else { 2318 } else {
2314 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr, 2319 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
2315 "wrong instance type"); 2320 Deoptimizer::kWrongInstanceType);
2316 } 2321 }
2317 } else { 2322 } else {
2318 if (tag == 0) { 2323 if (tag == 0) {
2319 __ Tst(scratch, mask); 2324 __ Tst(scratch, mask);
2320 } else { 2325 } else {
2321 __ And(scratch, scratch, mask); 2326 __ And(scratch, scratch, mask);
2322 __ Cmp(scratch, tag); 2327 __ Cmp(scratch, tag);
2323 } 2328 }
2324 DeoptimizeIf(ne, instr, "wrong instance type"); 2329 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
2325 } 2330 }
2326 } 2331 }
2327 } 2332 }
2328 2333
2329 2334
2330 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 2335 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2331 DoubleRegister input = ToDoubleRegister(instr->unclamped()); 2336 DoubleRegister input = ToDoubleRegister(instr->unclamped());
2332 Register result = ToRegister32(instr->result()); 2337 Register result = ToRegister32(instr->result());
2333 __ ClampDoubleToUint8(result, input, double_scratch()); 2338 __ ClampDoubleToUint8(result, input, double_scratch());
2334 } 2339 }
(...skipping 19 matching lines...) Expand all
2354 __ B(&done); 2359 __ B(&done);
2355 2360
2356 __ Bind(&is_not_smi); 2361 __ Bind(&is_not_smi);
2357 2362
2358 // Check for heap number. 2363 // Check for heap number.
2359 Label is_heap_number; 2364 Label is_heap_number;
2360 __ JumpIfHeapNumber(input, &is_heap_number); 2365 __ JumpIfHeapNumber(input, &is_heap_number);
2361 2366
2362 // Check for undefined. Undefined is coverted to zero for clamping conversion. 2367 // Check for undefined. Undefined is coverted to zero for clamping conversion.
2363 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, 2368 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
2364 "not a heap number/undefined"); 2369 Deoptimizer::kNotAHeapNumberUndefined);
2365 __ Mov(result, 0); 2370 __ Mov(result, 0);
2366 __ B(&done); 2371 __ B(&done);
2367 2372
2368 // Heap number case. 2373 // Heap number case.
2369 __ Bind(&is_heap_number); 2374 __ Bind(&is_heap_number);
2370 DoubleRegister dbl_scratch = double_scratch(); 2375 DoubleRegister dbl_scratch = double_scratch();
2371 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1()); 2376 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
2372 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); 2377 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2373 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); 2378 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2374 2379
(...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after
2659 if (isolate()->heap()->InNewSpace(*object)) { 2664 if (isolate()->heap()->InNewSpace(*object)) {
2660 UseScratchRegisterScope temps(masm()); 2665 UseScratchRegisterScope temps(masm());
2661 Register temp = temps.AcquireX(); 2666 Register temp = temps.AcquireX();
2662 Handle<Cell> cell = isolate()->factory()->NewCell(object); 2667 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2663 __ Mov(temp, Operand(Handle<Object>(cell))); 2668 __ Mov(temp, Operand(Handle<Object>(cell)));
2664 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); 2669 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2665 __ Cmp(reg, temp); 2670 __ Cmp(reg, temp);
2666 } else { 2671 } else {
2667 __ Cmp(reg, Operand(object)); 2672 __ Cmp(reg, Operand(object));
2668 } 2673 }
2669 DeoptimizeIf(ne, instr, "value mismatch"); 2674 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
2670 } 2675 }
2671 2676
2672 2677
2673 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 2678 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2674 last_lazy_deopt_pc_ = masm()->pc_offset(); 2679 last_lazy_deopt_pc_ = masm()->pc_offset();
2675 DCHECK(instr->HasEnvironment()); 2680 DCHECK(instr->HasEnvironment());
2676 LEnvironment* env = instr->environment(); 2681 LEnvironment* env = instr->environment();
2677 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 2682 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2678 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2683 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2679 } 2684 }
2680 2685
2681 2686
2682 void LCodeGen::DoDateField(LDateField* instr) { 2687 void LCodeGen::DoDateField(LDateField* instr) {
2683 Register object = ToRegister(instr->date()); 2688 Register object = ToRegister(instr->date());
2684 Register result = ToRegister(instr->result()); 2689 Register result = ToRegister(instr->result());
2685 Register temp1 = x10; 2690 Register temp1 = x10;
2686 Register temp2 = x11; 2691 Register temp2 = x11;
2687 Smi* index = instr->index(); 2692 Smi* index = instr->index();
2688 Label runtime, done; 2693 Label runtime, done;
2689 2694
2690 DCHECK(object.is(result) && object.Is(x0)); 2695 DCHECK(object.is(result) && object.Is(x0));
2691 DCHECK(instr->IsMarkedAsCall()); 2696 DCHECK(instr->IsMarkedAsCall());
2692 2697
2693 DeoptimizeIfSmi(object, instr, "Smi"); 2698 DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
2694 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE); 2699 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2695 DeoptimizeIf(ne, instr, "not a date object"); 2700 DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
2696 2701
2697 if (index->value() == 0) { 2702 if (index->value() == 0) {
2698 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); 2703 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2699 } else { 2704 } else {
2700 if (index->value() < JSDate::kFirstUncachedField) { 2705 if (index->value() < JSDate::kFirstUncachedField) {
2701 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); 2706 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2702 __ Mov(temp1, Operand(stamp)); 2707 __ Mov(temp1, Operand(stamp));
2703 __ Ldr(temp1, MemOperand(temp1)); 2708 __ Ldr(temp1, MemOperand(temp1));
2704 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset)); 2709 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2705 __ Cmp(temp1, temp2); 2710 __ Cmp(temp1, temp2);
(...skipping 29 matching lines...) Expand all
2735 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 2740 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2736 Register dividend = ToRegister32(instr->dividend()); 2741 Register dividend = ToRegister32(instr->dividend());
2737 int32_t divisor = instr->divisor(); 2742 int32_t divisor = instr->divisor();
2738 Register result = ToRegister32(instr->result()); 2743 Register result = ToRegister32(instr->result());
2739 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); 2744 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
2740 DCHECK(!result.is(dividend)); 2745 DCHECK(!result.is(dividend));
2741 2746
2742 // Check for (0 / -x) that will produce negative zero. 2747 // Check for (0 / -x) that will produce negative zero.
2743 HDiv* hdiv = instr->hydrogen(); 2748 HDiv* hdiv = instr->hydrogen();
2744 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 2749 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2745 DeoptimizeIfZero(dividend, instr, "division by zero"); 2750 DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
2746 } 2751 }
2747 // Check for (kMinInt / -1). 2752 // Check for (kMinInt / -1).
2748 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 2753 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2749 // Test dividend for kMinInt by subtracting one (cmp) and checking for 2754 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2750 // overflow. 2755 // overflow.
2751 __ Cmp(dividend, 1); 2756 __ Cmp(dividend, 1);
2752 DeoptimizeIf(vs, instr, "overflow"); 2757 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
2753 } 2758 }
2754 // Deoptimize if remainder will not be 0. 2759 // Deoptimize if remainder will not be 0.
2755 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && 2760 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2756 divisor != 1 && divisor != -1) { 2761 divisor != 1 && divisor != -1) {
2757 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 2762 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2758 __ Tst(dividend, mask); 2763 __ Tst(dividend, mask);
2759 DeoptimizeIf(ne, instr, "lost precision"); 2764 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
2760 } 2765 }
2761 2766
2762 if (divisor == -1) { // Nice shortcut, not needed for correctness. 2767 if (divisor == -1) { // Nice shortcut, not needed for correctness.
2763 __ Neg(result, dividend); 2768 __ Neg(result, dividend);
2764 return; 2769 return;
2765 } 2770 }
2766 int32_t shift = WhichPowerOf2Abs(divisor); 2771 int32_t shift = WhichPowerOf2Abs(divisor);
2767 if (shift == 0) { 2772 if (shift == 0) {
2768 __ Mov(result, dividend); 2773 __ Mov(result, dividend);
2769 } else if (shift == 1) { 2774 } else if (shift == 1) {
2770 __ Add(result, dividend, Operand(dividend, LSR, 31)); 2775 __ Add(result, dividend, Operand(dividend, LSR, 31));
2771 } else { 2776 } else {
2772 __ Mov(result, Operand(dividend, ASR, 31)); 2777 __ Mov(result, Operand(dividend, ASR, 31));
2773 __ Add(result, dividend, Operand(result, LSR, 32 - shift)); 2778 __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2774 } 2779 }
2775 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); 2780 if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2776 if (divisor < 0) __ Neg(result, result); 2781 if (divisor < 0) __ Neg(result, result);
2777 } 2782 }
2778 2783
2779 2784
2780 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 2785 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2781 Register dividend = ToRegister32(instr->dividend()); 2786 Register dividend = ToRegister32(instr->dividend());
2782 int32_t divisor = instr->divisor(); 2787 int32_t divisor = instr->divisor();
2783 Register result = ToRegister32(instr->result()); 2788 Register result = ToRegister32(instr->result());
2784 DCHECK(!AreAliased(dividend, result)); 2789 DCHECK(!AreAliased(dividend, result));
2785 2790
2786 if (divisor == 0) { 2791 if (divisor == 0) {
2787 Deoptimize(instr, "division by zero"); 2792 Deoptimize(instr, Deoptimizer::kDivisionByZero);
2788 return; 2793 return;
2789 } 2794 }
2790 2795
2791 // Check for (0 / -x) that will produce negative zero. 2796 // Check for (0 / -x) that will produce negative zero.
2792 HDiv* hdiv = instr->hydrogen(); 2797 HDiv* hdiv = instr->hydrogen();
2793 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 2798 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2794 DeoptimizeIfZero(dividend, instr, "minus zero"); 2799 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
2795 } 2800 }
2796 2801
2797 __ TruncatingDiv(result, dividend, Abs(divisor)); 2802 __ TruncatingDiv(result, dividend, Abs(divisor));
2798 if (divisor < 0) __ Neg(result, result); 2803 if (divisor < 0) __ Neg(result, result);
2799 2804
2800 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 2805 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2801 Register temp = ToRegister32(instr->temp()); 2806 Register temp = ToRegister32(instr->temp());
2802 DCHECK(!AreAliased(dividend, result, temp)); 2807 DCHECK(!AreAliased(dividend, result, temp));
2803 __ Sxtw(dividend.X(), dividend); 2808 __ Sxtw(dividend.X(), dividend);
2804 __ Mov(temp, divisor); 2809 __ Mov(temp, divisor);
2805 __ Smsubl(temp.X(), result, temp, dividend.X()); 2810 __ Smsubl(temp.X(), result, temp, dividend.X());
2806 DeoptimizeIfNotZero(temp, instr, "lost precision"); 2811 DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
2807 } 2812 }
2808 } 2813 }
2809 2814
2810 2815
2811 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 2816 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
2812 void LCodeGen::DoDivI(LDivI* instr) { 2817 void LCodeGen::DoDivI(LDivI* instr) {
2813 HBinaryOperation* hdiv = instr->hydrogen(); 2818 HBinaryOperation* hdiv = instr->hydrogen();
2814 Register dividend = ToRegister32(instr->dividend()); 2819 Register dividend = ToRegister32(instr->dividend());
2815 Register divisor = ToRegister32(instr->divisor()); 2820 Register divisor = ToRegister32(instr->divisor());
2816 Register result = ToRegister32(instr->result()); 2821 Register result = ToRegister32(instr->result());
2817 2822
2818 // Issue the division first, and then check for any deopt cases whilst the 2823 // Issue the division first, and then check for any deopt cases whilst the
2819 // result is computed. 2824 // result is computed.
2820 __ Sdiv(result, dividend, divisor); 2825 __ Sdiv(result, dividend, divisor);
2821 2826
2822 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 2827 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2823 DCHECK(!instr->temp()); 2828 DCHECK(!instr->temp());
2824 return; 2829 return;
2825 } 2830 }
2826 2831
2827 // Check for x / 0. 2832 // Check for x / 0.
2828 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 2833 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2829 DeoptimizeIfZero(divisor, instr, "division by zero"); 2834 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
2830 } 2835 }
2831 2836
2832 // Check for (0 / -x) as that will produce negative zero. 2837 // Check for (0 / -x) as that will produce negative zero.
2833 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 2838 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2834 __ Cmp(divisor, 0); 2839 __ Cmp(divisor, 0);
2835 2840
2836 // If the divisor < 0 (mi), compare the dividend, and deopt if it is 2841 // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2837 // zero, ie. zero dividend with negative divisor deopts. 2842 // zero, ie. zero dividend with negative divisor deopts.
2838 // If the divisor >= 0 (pl, the opposite of mi) set the flags to 2843 // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2839 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. 2844 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2840 __ Ccmp(dividend, 0, NoFlag, mi); 2845 __ Ccmp(dividend, 0, NoFlag, mi);
2841 DeoptimizeIf(eq, instr, "minus zero"); 2846 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
2842 } 2847 }
2843 2848
2844 // Check for (kMinInt / -1). 2849 // Check for (kMinInt / -1).
2845 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 2850 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2846 // Test dividend for kMinInt by subtracting one (cmp) and checking for 2851 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2847 // overflow. 2852 // overflow.
2848 __ Cmp(dividend, 1); 2853 __ Cmp(dividend, 1);
2849 // If overflow is set, ie. dividend = kMinInt, compare the divisor with 2854 // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2850 // -1. If overflow is clear, set the flags for condition ne, as the 2855 // -1. If overflow is clear, set the flags for condition ne, as the
2851 // dividend isn't -1, and thus we shouldn't deopt. 2856 // dividend isn't -1, and thus we shouldn't deopt.
2852 __ Ccmp(divisor, -1, NoFlag, vs); 2857 __ Ccmp(divisor, -1, NoFlag, vs);
2853 DeoptimizeIf(eq, instr, "overflow"); 2858 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
2854 } 2859 }
2855 2860
2856 // Compute remainder and deopt if it's not zero. 2861 // Compute remainder and deopt if it's not zero.
2857 Register remainder = ToRegister32(instr->temp()); 2862 Register remainder = ToRegister32(instr->temp());
2858 __ Msub(remainder, result, divisor, dividend); 2863 __ Msub(remainder, result, divisor, dividend);
2859 DeoptimizeIfNotZero(remainder, instr, "lost precision"); 2864 DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
2860 } 2865 }
2861 2866
2862 2867
2863 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { 2868 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2864 DoubleRegister input = ToDoubleRegister(instr->value()); 2869 DoubleRegister input = ToDoubleRegister(instr->value());
2865 Register result = ToRegister32(instr->result()); 2870 Register result = ToRegister32(instr->result());
2866 2871
2867 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 2872 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2868 DeoptimizeIfMinusZero(input, instr, "minus zero"); 2873 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
2869 } 2874 }
2870 2875
2871 __ TryRepresentDoubleAsInt32(result, input, double_scratch()); 2876 __ TryRepresentDoubleAsInt32(result, input, double_scratch());
2872 DeoptimizeIf(ne, instr, "lost precision or NaN"); 2877 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
2873 2878
2874 if (instr->tag_result()) { 2879 if (instr->tag_result()) {
2875 __ SmiTag(result.X()); 2880 __ SmiTag(result.X());
2876 } 2881 }
2877 } 2882 }
2878 2883
2879 2884
2880 void LCodeGen::DoDrop(LDrop* instr) { 2885 void LCodeGen::DoDrop(LDrop* instr) {
2881 __ Drop(instr->count()); 2886 __ Drop(instr->count());
2882 } 2887 }
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
2923 __ EnumLengthUntagged(result, map); 2928 __ EnumLengthUntagged(result, map);
2924 __ Cbnz(result, &load_cache); 2929 __ Cbnz(result, &load_cache);
2925 2930
2926 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array())); 2931 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2927 __ B(&done); 2932 __ B(&done);
2928 2933
2929 __ Bind(&load_cache); 2934 __ Bind(&load_cache);
2930 __ LoadInstanceDescriptors(map, result); 2935 __ LoadInstanceDescriptors(map, result);
2931 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 2936 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2932 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 2937 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2933 DeoptimizeIfZero(result, instr, "no cache"); 2938 DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
2934 2939
2935 __ Bind(&done); 2940 __ Bind(&done);
2936 } 2941 }
2937 2942
2938 2943
2939 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 2944 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2940 Register object = ToRegister(instr->object()); 2945 Register object = ToRegister(instr->object());
2941 Register null_value = x5; 2946 Register null_value = x5;
2942 2947
2943 DCHECK(instr->IsMarkedAsCall()); 2948 DCHECK(instr->IsMarkedAsCall());
2944 DCHECK(object.Is(x0)); 2949 DCHECK(object.Is(x0));
2945 2950
2946 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, "undefined"); 2951 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr,
2952 Deoptimizer::kUndefined);
2947 2953
2948 __ LoadRoot(null_value, Heap::kNullValueRootIndex); 2954 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2949 __ Cmp(object, null_value); 2955 __ Cmp(object, null_value);
2950 DeoptimizeIf(eq, instr, "null"); 2956 DeoptimizeIf(eq, instr, Deoptimizer::kNull);
2951 2957
2952 DeoptimizeIfSmi(object, instr, "Smi"); 2958 DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
2953 2959
2954 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); 2960 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
2955 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE); 2961 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2956 DeoptimizeIf(le, instr, "not a JavaScript object"); 2962 DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
2957 2963
2958 Label use_cache, call_runtime; 2964 Label use_cache, call_runtime;
2959 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime); 2965 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2960 2966
2961 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); 2967 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2962 __ B(&use_cache); 2968 __ B(&use_cache);
2963 2969
2964 // Get the set of properties to enumerate. 2970 // Get the set of properties to enumerate.
2965 __ Bind(&call_runtime); 2971 __ Bind(&call_runtime);
2966 __ Push(object); 2972 __ Push(object);
2967 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); 2973 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2968 2974
2969 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset)); 2975 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2970 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, "wrong map"); 2976 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
2977 Deoptimizer::kWrongMap);
2971 2978
2972 __ Bind(&use_cache); 2979 __ Bind(&use_cache);
2973 } 2980 }
2974 2981
2975 2982
2976 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2983 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2977 Register input = ToRegister(instr->value()); 2984 Register input = ToRegister(instr->value());
2978 Register result = ToRegister(instr->result()); 2985 Register result = ToRegister(instr->result());
2979 2986
2980 __ AssertString(input); 2987 __ AssertString(input);
(...skipping 370 matching lines...) Expand 10 before | Expand all | Expand 10 after
3351 DoGap(label); 3358 DoGap(label);
3352 } 3359 }
3353 3360
3354 3361
3355 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 3362 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3356 Register context = ToRegister(instr->context()); 3363 Register context = ToRegister(instr->context());
3357 Register result = ToRegister(instr->result()); 3364 Register result = ToRegister(instr->result());
3358 __ Ldr(result, ContextMemOperand(context, instr->slot_index())); 3365 __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3359 if (instr->hydrogen()->RequiresHoleCheck()) { 3366 if (instr->hydrogen()->RequiresHoleCheck()) {
3360 if (instr->hydrogen()->DeoptimizesOnHole()) { 3367 if (instr->hydrogen()->DeoptimizesOnHole()) {
3361 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); 3368 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3369 Deoptimizer::kHole);
3362 } else { 3370 } else {
3363 Label not_the_hole; 3371 Label not_the_hole;
3364 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole); 3372 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
3365 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 3373 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3366 __ Bind(&not_the_hole); 3374 __ Bind(&not_the_hole);
3367 } 3375 }
3368 } 3376 }
3369 } 3377 }
3370 3378
3371 3379
3372 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 3380 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3373 Register function = ToRegister(instr->function()); 3381 Register function = ToRegister(instr->function());
3374 Register result = ToRegister(instr->result()); 3382 Register result = ToRegister(instr->result());
3375 Register temp = ToRegister(instr->temp()); 3383 Register temp = ToRegister(instr->temp());
3376 3384
3377 // Get the prototype or initial map from the function. 3385 // Get the prototype or initial map from the function.
3378 __ Ldr(result, FieldMemOperand(function, 3386 __ Ldr(result, FieldMemOperand(function,
3379 JSFunction::kPrototypeOrInitialMapOffset)); 3387 JSFunction::kPrototypeOrInitialMapOffset));
3380 3388
3381 // Check that the function has a prototype or an initial map. 3389 // Check that the function has a prototype or an initial map.
3382 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); 3390 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3391 Deoptimizer::kHole);
3383 3392
3384 // If the function does not have an initial map, we're done. 3393 // If the function does not have an initial map, we're done.
3385 Label done; 3394 Label done;
3386 __ CompareObjectType(result, temp, temp, MAP_TYPE); 3395 __ CompareObjectType(result, temp, temp, MAP_TYPE);
3387 __ B(ne, &done); 3396 __ B(ne, &done);
3388 3397
3389 // Get the prototype from the initial map. 3398 // Get the prototype from the initial map.
3390 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); 3399 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3391 3400
3392 // All done. 3401 // All done.
3393 __ Bind(&done); 3402 __ Bind(&done);
3394 } 3403 }
3395 3404
3396 3405
3397 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 3406 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3398 Register result = ToRegister(instr->result()); 3407 Register result = ToRegister(instr->result());
3399 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); 3408 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
3400 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); 3409 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3401 if (instr->hydrogen()->RequiresHoleCheck()) { 3410 if (instr->hydrogen()->RequiresHoleCheck()) {
3402 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); 3411 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3412 Deoptimizer::kHole);
3403 } 3413 }
3404 } 3414 }
3405 3415
3406 3416
3407 template <class T> 3417 template <class T>
3408 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { 3418 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
3409 DCHECK(FLAG_vector_ics); 3419 DCHECK(FLAG_vector_ics);
3410 Register vector_register = ToRegister(instr->temp_vector()); 3420 Register vector_register = ToRegister(instr->temp_vector());
3411 Register slot_register = VectorLoadICDescriptor::SlotRegister(); 3421 Register slot_register = VectorLoadICDescriptor::SlotRegister();
3412 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); 3422 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
3528 case EXTERNAL_INT32_ELEMENTS: 3538 case EXTERNAL_INT32_ELEMENTS:
3529 case INT32_ELEMENTS: 3539 case INT32_ELEMENTS:
3530 __ Ldrsw(result, mem_op); 3540 __ Ldrsw(result, mem_op);
3531 break; 3541 break;
3532 case EXTERNAL_UINT32_ELEMENTS: 3542 case EXTERNAL_UINT32_ELEMENTS:
3533 case UINT32_ELEMENTS: 3543 case UINT32_ELEMENTS:
3534 __ Ldr(result.W(), mem_op); 3544 __ Ldr(result.W(), mem_op);
3535 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 3545 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3536 // Deopt if value > 0x80000000. 3546 // Deopt if value > 0x80000000.
3537 __ Tst(result, 0xFFFFFFFF80000000); 3547 __ Tst(result, 0xFFFFFFFF80000000);
3538 DeoptimizeIf(ne, instr, "negative value"); 3548 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
3539 } 3549 }
3540 break; 3550 break;
3541 case FLOAT32_ELEMENTS: 3551 case FLOAT32_ELEMENTS:
3542 case FLOAT64_ELEMENTS: 3552 case FLOAT64_ELEMENTS:
3543 case EXTERNAL_FLOAT32_ELEMENTS: 3553 case EXTERNAL_FLOAT32_ELEMENTS:
3544 case EXTERNAL_FLOAT64_ELEMENTS: 3554 case EXTERNAL_FLOAT64_ELEMENTS:
3545 case FAST_HOLEY_DOUBLE_ELEMENTS: 3555 case FAST_HOLEY_DOUBLE_ELEMENTS:
3546 case FAST_HOLEY_ELEMENTS: 3556 case FAST_HOLEY_ELEMENTS:
3547 case FAST_HOLEY_SMI_ELEMENTS: 3557 case FAST_HOLEY_SMI_ELEMENTS:
3548 case FAST_DOUBLE_ELEMENTS: 3558 case FAST_DOUBLE_ELEMENTS:
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
3622 instr->hydrogen()->representation(), 3632 instr->hydrogen()->representation(),
3623 instr->base_offset()); 3633 instr->base_offset());
3624 } 3634 }
3625 3635
3626 __ Ldr(result, mem_op); 3636 __ Ldr(result, mem_op);
3627 3637
3628 if (instr->hydrogen()->RequiresHoleCheck()) { 3638 if (instr->hydrogen()->RequiresHoleCheck()) {
3629 Register scratch = ToRegister(instr->temp()); 3639 Register scratch = ToRegister(instr->temp());
3630 __ Fmov(scratch, result); 3640 __ Fmov(scratch, result);
3631 __ Eor(scratch, scratch, kHoleNanInt64); 3641 __ Eor(scratch, scratch, kHoleNanInt64);
3632 DeoptimizeIfZero(scratch, instr, "hole"); 3642 DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
3633 } 3643 }
3634 } 3644 }
3635 3645
3636 3646
3637 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { 3647 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3638 Register elements = ToRegister(instr->elements()); 3648 Register elements = ToRegister(instr->elements());
3639 Register result = ToRegister(instr->result()); 3649 Register result = ToRegister(instr->result());
3640 MemOperand mem_op; 3650 MemOperand mem_op;
3641 3651
3642 Representation representation = instr->hydrogen()->representation(); 3652 Representation representation = instr->hydrogen()->representation();
(...skipping 17 matching lines...) Expand all
3660 3670
3661 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, 3671 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3662 instr->hydrogen()->elements_kind(), 3672 instr->hydrogen()->elements_kind(),
3663 representation, instr->base_offset()); 3673 representation, instr->base_offset());
3664 } 3674 }
3665 3675
3666 __ Load(result, mem_op, representation); 3676 __ Load(result, mem_op, representation);
3667 3677
3668 if (instr->hydrogen()->RequiresHoleCheck()) { 3678 if (instr->hydrogen()->RequiresHoleCheck()) {
3669 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 3679 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3670 DeoptimizeIfNotSmi(result, instr, "not a Smi"); 3680 DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
3671 } else { 3681 } else {
3672 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); 3682 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3683 Deoptimizer::kHole);
3673 } 3684 }
3674 } 3685 }
3675 } 3686 }
3676 3687
3677 3688
3678 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3689 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3679 DCHECK(ToRegister(instr->context()).is(cp)); 3690 DCHECK(ToRegister(instr->context()).is(cp));
3680 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 3691 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3681 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); 3692 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3682 if (FLAG_vector_ics) { 3693 if (FLAG_vector_ics) {
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
3766 if (r.IsDouble()) { 3777 if (r.IsDouble()) {
3767 DoubleRegister input = ToDoubleRegister(instr->value()); 3778 DoubleRegister input = ToDoubleRegister(instr->value());
3768 DoubleRegister result = ToDoubleRegister(instr->result()); 3779 DoubleRegister result = ToDoubleRegister(instr->result());
3769 __ Fabs(result, input); 3780 __ Fabs(result, input);
3770 } else if (r.IsSmi() || r.IsInteger32()) { 3781 } else if (r.IsSmi() || r.IsInteger32()) {
3771 Register input = r.IsSmi() ? ToRegister(instr->value()) 3782 Register input = r.IsSmi() ? ToRegister(instr->value())
3772 : ToRegister32(instr->value()); 3783 : ToRegister32(instr->value());
3773 Register result = r.IsSmi() ? ToRegister(instr->result()) 3784 Register result = r.IsSmi() ? ToRegister(instr->result())
3774 : ToRegister32(instr->result()); 3785 : ToRegister32(instr->result());
3775 __ Abs(result, input); 3786 __ Abs(result, input);
3776 DeoptimizeIf(vs, instr, "overflow"); 3787 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3777 } 3788 }
3778 } 3789 }
3779 3790
3780 3791
3781 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr, 3792 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3782 Label* exit, 3793 Label* exit,
3783 Label* allocation_entry) { 3794 Label* allocation_entry) {
3784 // Handle the tricky cases of MathAbsTagged: 3795 // Handle the tricky cases of MathAbsTagged:
3785 // - HeapNumber inputs. 3796 // - HeapNumber inputs.
3786 // - Negative inputs produce a positive result, so a new HeapNumber is 3797 // - Negative inputs produce a positive result, so a new HeapNumber is
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
3918 3929
3919 __ Frintm(result, input); 3930 __ Frintm(result, input);
3920 } 3931 }
3921 3932
3922 3933
3923 void LCodeGen::DoMathFloorI(LMathFloorI* instr) { 3934 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3924 DoubleRegister input = ToDoubleRegister(instr->value()); 3935 DoubleRegister input = ToDoubleRegister(instr->value());
3925 Register result = ToRegister(instr->result()); 3936 Register result = ToRegister(instr->result());
3926 3937
3927 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3938 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3928 DeoptimizeIfMinusZero(input, instr, "minus zero"); 3939 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
3929 } 3940 }
3930 3941
3931 __ Fcvtms(result, input); 3942 __ Fcvtms(result, input);
3932 3943
3933 // Check that the result fits into a 32-bit integer. 3944 // Check that the result fits into a 32-bit integer.
3934 // - The result did not overflow. 3945 // - The result did not overflow.
3935 __ Cmp(result, Operand(result, SXTW)); 3946 __ Cmp(result, Operand(result, SXTW));
3936 // - The input was not NaN. 3947 // - The input was not NaN.
3937 __ Fccmp(input, input, NoFlag, eq); 3948 __ Fccmp(input, input, NoFlag, eq);
3938 DeoptimizeIf(ne, instr, "lost precision or NaN"); 3949 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
3939 } 3950 }
3940 3951
3941 3952
3942 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 3953 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3943 Register dividend = ToRegister32(instr->dividend()); 3954 Register dividend = ToRegister32(instr->dividend());
3944 Register result = ToRegister32(instr->result()); 3955 Register result = ToRegister32(instr->result());
3945 int32_t divisor = instr->divisor(); 3956 int32_t divisor = instr->divisor();
3946 3957
3947 // If the divisor is 1, return the dividend. 3958 // If the divisor is 1, return the dividend.
3948 if (divisor == 1) { 3959 if (divisor == 1) {
3949 __ Mov(result, dividend, kDiscardForSameWReg); 3960 __ Mov(result, dividend, kDiscardForSameWReg);
3950 return; 3961 return;
3951 } 3962 }
3952 3963
3953 // If the divisor is positive, things are easy: There can be no deopts and we 3964 // If the divisor is positive, things are easy: There can be no deopts and we
3954 // can simply do an arithmetic right shift. 3965 // can simply do an arithmetic right shift.
3955 int32_t shift = WhichPowerOf2Abs(divisor); 3966 int32_t shift = WhichPowerOf2Abs(divisor);
3956 if (divisor > 1) { 3967 if (divisor > 1) {
3957 __ Mov(result, Operand(dividend, ASR, shift)); 3968 __ Mov(result, Operand(dividend, ASR, shift));
3958 return; 3969 return;
3959 } 3970 }
3960 3971
3961 // If the divisor is negative, we have to negate and handle edge cases. 3972 // If the divisor is negative, we have to negate and handle edge cases.
3962 __ Negs(result, dividend); 3973 __ Negs(result, dividend);
3963 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3974 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3964 DeoptimizeIf(eq, instr, "minus zero"); 3975 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
3965 } 3976 }
3966 3977
3967 // Dividing by -1 is basically negation, unless we overflow. 3978 // Dividing by -1 is basically negation, unless we overflow.
3968 if (divisor == -1) { 3979 if (divisor == -1) {
3969 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 3980 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3970 DeoptimizeIf(vs, instr, "overflow"); 3981 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3971 } 3982 }
3972 return; 3983 return;
3973 } 3984 }
3974 3985
3975 // If the negation could not overflow, simply shifting is OK. 3986 // If the negation could not overflow, simply shifting is OK.
3976 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 3987 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3977 __ Mov(result, Operand(dividend, ASR, shift)); 3988 __ Mov(result, Operand(dividend, ASR, shift));
3978 return; 3989 return;
3979 } 3990 }
3980 3991
3981 __ Asr(result, result, shift); 3992 __ Asr(result, result, shift);
3982 __ Csel(result, result, kMinInt / divisor, vc); 3993 __ Csel(result, result, kMinInt / divisor, vc);
3983 } 3994 }
3984 3995
3985 3996
3986 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 3997 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3987 Register dividend = ToRegister32(instr->dividend()); 3998 Register dividend = ToRegister32(instr->dividend());
3988 int32_t divisor = instr->divisor(); 3999 int32_t divisor = instr->divisor();
3989 Register result = ToRegister32(instr->result()); 4000 Register result = ToRegister32(instr->result());
3990 DCHECK(!AreAliased(dividend, result)); 4001 DCHECK(!AreAliased(dividend, result));
3991 4002
3992 if (divisor == 0) { 4003 if (divisor == 0) {
3993 Deoptimize(instr, "division by zero"); 4004 Deoptimize(instr, Deoptimizer::kDivisionByZero);
3994 return; 4005 return;
3995 } 4006 }
3996 4007
3997 // Check for (0 / -x) that will produce negative zero. 4008 // Check for (0 / -x) that will produce negative zero.
3998 HMathFloorOfDiv* hdiv = instr->hydrogen(); 4009 HMathFloorOfDiv* hdiv = instr->hydrogen();
3999 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 4010 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
4000 DeoptimizeIfZero(dividend, instr, "minus zero"); 4011 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
4001 } 4012 }
4002 4013
4003 // Easy case: We need no dynamic check for the dividend and the flooring 4014 // Easy case: We need no dynamic check for the dividend and the flooring
4004 // division is the same as the truncating division. 4015 // division is the same as the truncating division.
4005 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 4016 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
4006 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 4017 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
4007 __ TruncatingDiv(result, dividend, Abs(divisor)); 4018 __ TruncatingDiv(result, dividend, Abs(divisor));
4008 if (divisor < 0) __ Neg(result, result); 4019 if (divisor < 0) __ Neg(result, result);
4009 return; 4020 return;
4010 } 4021 }
(...skipping 22 matching lines...) Expand all
4033 Register dividend = ToRegister32(instr->dividend()); 4044 Register dividend = ToRegister32(instr->dividend());
4034 Register divisor = ToRegister32(instr->divisor()); 4045 Register divisor = ToRegister32(instr->divisor());
4035 Register remainder = ToRegister32(instr->temp()); 4046 Register remainder = ToRegister32(instr->temp());
4036 Register result = ToRegister32(instr->result()); 4047 Register result = ToRegister32(instr->result());
4037 4048
4038 // This can't cause an exception on ARM, so we can speculatively 4049 // This can't cause an exception on ARM, so we can speculatively
4039 // execute it already now. 4050 // execute it already now.
4040 __ Sdiv(result, dividend, divisor); 4051 __ Sdiv(result, dividend, divisor);
4041 4052
4042 // Check for x / 0. 4053 // Check for x / 0.
4043 DeoptimizeIfZero(divisor, instr, "division by zero"); 4054 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
4044 4055
4045 // Check for (kMinInt / -1). 4056 // Check for (kMinInt / -1).
4046 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 4057 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
4047 // The V flag will be set iff dividend == kMinInt. 4058 // The V flag will be set iff dividend == kMinInt.
4048 __ Cmp(dividend, 1); 4059 __ Cmp(dividend, 1);
4049 __ Ccmp(divisor, -1, NoFlag, vs); 4060 __ Ccmp(divisor, -1, NoFlag, vs);
4050 DeoptimizeIf(eq, instr, "overflow"); 4061 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
4051 } 4062 }
4052 4063
4053 // Check for (0 / -x) that will produce negative zero. 4064 // Check for (0 / -x) that will produce negative zero.
4054 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4065 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4055 __ Cmp(divisor, 0); 4066 __ Cmp(divisor, 0);
4056 __ Ccmp(dividend, 0, ZFlag, mi); 4067 __ Ccmp(dividend, 0, ZFlag, mi);
4057 // "divisor" can't be null because the code would have already been 4068 // "divisor" can't be null because the code would have already been
4058 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). 4069 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
4059 // In this case we need to deoptimize to produce a -0. 4070 // In this case we need to deoptimize to produce a -0.
4060 DeoptimizeIf(eq, instr, "minus zero"); 4071 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4061 } 4072 }
4062 4073
4063 Label done; 4074 Label done;
4064 // If both operands have the same sign then we are done. 4075 // If both operands have the same sign then we are done.
4065 __ Eor(remainder, dividend, divisor); 4076 __ Eor(remainder, dividend, divisor);
4066 __ Tbz(remainder, kWSignBit, &done); 4077 __ Tbz(remainder, kWSignBit, &done);
4067 4078
4068 // Check if the result needs to be corrected. 4079 // Check if the result needs to be corrected.
4069 __ Msub(remainder, result, divisor, dividend); 4080 __ Msub(remainder, result, divisor, dividend);
4070 __ Cbz(remainder, &done); 4081 __ Cbz(remainder, &done);
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
4209 // result fits in 32 bits. 4220 // result fits in 32 bits.
4210 __ Cmp(result, Operand(result.W(), SXTW)); 4221 __ Cmp(result, Operand(result.W(), SXTW));
4211 __ Ccmp(result, 1, ZFlag, eq); 4222 __ Ccmp(result, 1, ZFlag, eq);
4212 __ B(hi, &done); 4223 __ B(hi, &done);
4213 4224
4214 // At this point, we have to handle possible inputs of NaN or numbers in the 4225 // At this point, we have to handle possible inputs of NaN or numbers in the
4215 // range [-0.5, 1.5[, or numbers larger than 32 bits. 4226 // range [-0.5, 1.5[, or numbers larger than 32 bits.
4216 4227
4217 // Deoptimize if the result > 1, as it must be larger than 32 bits. 4228 // Deoptimize if the result > 1, as it must be larger than 32 bits.
4218 __ Cmp(result, 1); 4229 __ Cmp(result, 1);
4219 DeoptimizeIf(hi, instr, "overflow"); 4230 DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
4220 4231
4221 // Deoptimize for negative inputs, which at this point are only numbers in 4232 // Deoptimize for negative inputs, which at this point are only numbers in
4222 // the range [-0.5, -0.0] 4233 // the range [-0.5, -0.0]
4223 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4234 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4224 __ Fmov(result, input); 4235 __ Fmov(result, input);
4225 DeoptimizeIfNegative(result, instr, "minus zero"); 4236 DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
4226 } 4237 }
4227 4238
4228 // Deoptimize if the input was NaN. 4239 // Deoptimize if the input was NaN.
4229 __ Fcmp(input, dot_five); 4240 __ Fcmp(input, dot_five);
4230 DeoptimizeIf(vs, instr, "NaN"); 4241 DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
4231 4242
4232 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[ 4243 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
4233 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1, 4244 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
4234 // else 0; we avoid dealing with 0.499...94 directly. 4245 // else 0; we avoid dealing with 0.499...94 directly.
4235 __ Cset(result, ge); 4246 __ Cset(result, ge);
4236 __ Bind(&done); 4247 __ Bind(&done);
4237 } 4248 }
4238 4249
4239 4250
4240 void LCodeGen::DoMathFround(LMathFround* instr) { 4251 void LCodeGen::DoMathFround(LMathFround* instr) {
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
4298 HMod* hmod = instr->hydrogen(); 4309 HMod* hmod = instr->hydrogen();
4299 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 4310 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4300 Label dividend_is_not_negative, done; 4311 Label dividend_is_not_negative, done;
4301 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 4312 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4302 __ Tbz(dividend, kWSignBit, &dividend_is_not_negative); 4313 __ Tbz(dividend, kWSignBit, &dividend_is_not_negative);
4303 // Note that this is correct even for kMinInt operands. 4314 // Note that this is correct even for kMinInt operands.
4304 __ Neg(dividend, dividend); 4315 __ Neg(dividend, dividend);
4305 __ And(dividend, dividend, mask); 4316 __ And(dividend, dividend, mask);
4306 __ Negs(dividend, dividend); 4317 __ Negs(dividend, dividend);
4307 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 4318 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4308 DeoptimizeIf(eq, instr, "minus zero"); 4319 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4309 } 4320 }
4310 __ B(&done); 4321 __ B(&done);
4311 } 4322 }
4312 4323
4313 __ bind(&dividend_is_not_negative); 4324 __ bind(&dividend_is_not_negative);
4314 __ And(dividend, dividend, mask); 4325 __ And(dividend, dividend, mask);
4315 __ bind(&done); 4326 __ bind(&done);
4316 } 4327 }
4317 4328
4318 4329
4319 void LCodeGen::DoModByConstI(LModByConstI* instr) { 4330 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4320 Register dividend = ToRegister32(instr->dividend()); 4331 Register dividend = ToRegister32(instr->dividend());
4321 int32_t divisor = instr->divisor(); 4332 int32_t divisor = instr->divisor();
4322 Register result = ToRegister32(instr->result()); 4333 Register result = ToRegister32(instr->result());
4323 Register temp = ToRegister32(instr->temp()); 4334 Register temp = ToRegister32(instr->temp());
4324 DCHECK(!AreAliased(dividend, result, temp)); 4335 DCHECK(!AreAliased(dividend, result, temp));
4325 4336
4326 if (divisor == 0) { 4337 if (divisor == 0) {
4327 Deoptimize(instr, "division by zero"); 4338 Deoptimize(instr, Deoptimizer::kDivisionByZero);
4328 return; 4339 return;
4329 } 4340 }
4330 4341
4331 __ TruncatingDiv(result, dividend, Abs(divisor)); 4342 __ TruncatingDiv(result, dividend, Abs(divisor));
4332 __ Sxtw(dividend.X(), dividend); 4343 __ Sxtw(dividend.X(), dividend);
4333 __ Mov(temp, Abs(divisor)); 4344 __ Mov(temp, Abs(divisor));
4334 __ Smsubl(result.X(), result, temp, dividend.X()); 4345 __ Smsubl(result.X(), result, temp, dividend.X());
4335 4346
4336 // Check for negative zero. 4347 // Check for negative zero.
4337 HMod* hmod = instr->hydrogen(); 4348 HMod* hmod = instr->hydrogen();
4338 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 4349 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4339 Label remainder_not_zero; 4350 Label remainder_not_zero;
4340 __ Cbnz(result, &remainder_not_zero); 4351 __ Cbnz(result, &remainder_not_zero);
4341 DeoptimizeIfNegative(dividend, instr, "minus zero"); 4352 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
4342 __ bind(&remainder_not_zero); 4353 __ bind(&remainder_not_zero);
4343 } 4354 }
4344 } 4355 }
4345 4356
4346 4357
4347 void LCodeGen::DoModI(LModI* instr) { 4358 void LCodeGen::DoModI(LModI* instr) {
4348 Register dividend = ToRegister32(instr->left()); 4359 Register dividend = ToRegister32(instr->left());
4349 Register divisor = ToRegister32(instr->right()); 4360 Register divisor = ToRegister32(instr->right());
4350 Register result = ToRegister32(instr->result()); 4361 Register result = ToRegister32(instr->result());
4351 4362
4352 Label done; 4363 Label done;
4353 // modulo = dividend - quotient * divisor 4364 // modulo = dividend - quotient * divisor
4354 __ Sdiv(result, dividend, divisor); 4365 __ Sdiv(result, dividend, divisor);
4355 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 4366 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
4356 DeoptimizeIfZero(divisor, instr, "division by zero"); 4367 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
4357 } 4368 }
4358 __ Msub(result, result, divisor, dividend); 4369 __ Msub(result, result, divisor, dividend);
4359 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4370 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4360 __ Cbnz(result, &done); 4371 __ Cbnz(result, &done);
4361 DeoptimizeIfNegative(dividend, instr, "minus zero"); 4372 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
4362 } 4373 }
4363 __ Bind(&done); 4374 __ Bind(&done);
4364 } 4375 }
4365 4376
4366 4377
4367 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { 4378 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4368 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32()); 4379 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
4369 bool is_smi = instr->hydrogen()->representation().IsSmi(); 4380 bool is_smi = instr->hydrogen()->representation().IsSmi();
4370 Register result = 4381 Register result =
4371 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); 4382 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4372 Register left = 4383 Register left =
4373 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ; 4384 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4374 int32_t right = ToInteger32(instr->right()); 4385 int32_t right = ToInteger32(instr->right());
4375 DCHECK((right > -kMaxInt) || (right < kMaxInt)); 4386 DCHECK((right > -kMaxInt) || (right < kMaxInt));
4376 4387
4377 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 4388 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4378 bool bailout_on_minus_zero = 4389 bool bailout_on_minus_zero =
4379 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 4390 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4380 4391
4381 if (bailout_on_minus_zero) { 4392 if (bailout_on_minus_zero) {
4382 if (right < 0) { 4393 if (right < 0) {
4383 // The result is -0 if right is negative and left is zero. 4394 // The result is -0 if right is negative and left is zero.
4384 DeoptimizeIfZero(left, instr, "minus zero"); 4395 DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
4385 } else if (right == 0) { 4396 } else if (right == 0) {
4386 // The result is -0 if the right is zero and the left is negative. 4397 // The result is -0 if the right is zero and the left is negative.
4387 DeoptimizeIfNegative(left, instr, "minus zero"); 4398 DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
4388 } 4399 }
4389 } 4400 }
4390 4401
4391 switch (right) { 4402 switch (right) {
4392 // Cases which can detect overflow. 4403 // Cases which can detect overflow.
4393 case -1: 4404 case -1:
4394 if (can_overflow) { 4405 if (can_overflow) {
4395 // Only 0x80000000 can overflow here. 4406 // Only 0x80000000 can overflow here.
4396 __ Negs(result, left); 4407 __ Negs(result, left);
4397 DeoptimizeIf(vs, instr, "overflow"); 4408 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4398 } else { 4409 } else {
4399 __ Neg(result, left); 4410 __ Neg(result, left);
4400 } 4411 }
4401 break; 4412 break;
4402 case 0: 4413 case 0:
4403 // This case can never overflow. 4414 // This case can never overflow.
4404 __ Mov(result, 0); 4415 __ Mov(result, 0);
4405 break; 4416 break;
4406 case 1: 4417 case 1:
4407 // This case can never overflow. 4418 // This case can never overflow.
4408 __ Mov(result, left, kDiscardForSameWReg); 4419 __ Mov(result, left, kDiscardForSameWReg);
4409 break; 4420 break;
4410 case 2: 4421 case 2:
4411 if (can_overflow) { 4422 if (can_overflow) {
4412 __ Adds(result, left, left); 4423 __ Adds(result, left, left);
4413 DeoptimizeIf(vs, instr, "overflow"); 4424 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4414 } else { 4425 } else {
4415 __ Add(result, left, left); 4426 __ Add(result, left, left);
4416 } 4427 }
4417 break; 4428 break;
4418 4429
4419 default: 4430 default:
4420 // Multiplication by constant powers of two (and some related values) 4431 // Multiplication by constant powers of two (and some related values)
4421 // can be done efficiently with shifted operands. 4432 // can be done efficiently with shifted operands.
4422 int32_t right_abs = Abs(right); 4433 int32_t right_abs = Abs(right);
4423 4434
4424 if (base::bits::IsPowerOfTwo32(right_abs)) { 4435 if (base::bits::IsPowerOfTwo32(right_abs)) {
4425 int right_log2 = WhichPowerOf2(right_abs); 4436 int right_log2 = WhichPowerOf2(right_abs);
4426 4437
4427 if (can_overflow) { 4438 if (can_overflow) {
4428 Register scratch = result; 4439 Register scratch = result;
4429 DCHECK(!AreAliased(scratch, left)); 4440 DCHECK(!AreAliased(scratch, left));
4430 __ Cls(scratch, left); 4441 __ Cls(scratch, left);
4431 __ Cmp(scratch, right_log2); 4442 __ Cmp(scratch, right_log2);
4432 DeoptimizeIf(lt, instr, "overflow"); 4443 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
4433 } 4444 }
4434 4445
4435 if (right >= 0) { 4446 if (right >= 0) {
4436 // result = left << log2(right) 4447 // result = left << log2(right)
4437 __ Lsl(result, left, right_log2); 4448 __ Lsl(result, left, right_log2);
4438 } else { 4449 } else {
4439 // result = -left << log2(-right) 4450 // result = -left << log2(-right)
4440 if (can_overflow) { 4451 if (can_overflow) {
4441 __ Negs(result, Operand(left, LSL, right_log2)); 4452 __ Negs(result, Operand(left, LSL, right_log2));
4442 DeoptimizeIf(vs, instr, "overflow"); 4453 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4443 } else { 4454 } else {
4444 __ Neg(result, Operand(left, LSL, right_log2)); 4455 __ Neg(result, Operand(left, LSL, right_log2));
4445 } 4456 }
4446 } 4457 }
4447 return; 4458 return;
4448 } 4459 }
4449 4460
4450 4461
4451 // For the following cases, we could perform a conservative overflow check 4462 // For the following cases, we could perform a conservative overflow check
4452 // with CLS as above. However the few cycles saved are likely not worth 4463 // with CLS as above. However the few cycles saved are likely not worth
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
4490 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 4501 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4491 4502
4492 if (bailout_on_minus_zero && !left.Is(right)) { 4503 if (bailout_on_minus_zero && !left.Is(right)) {
4493 // If one operand is zero and the other is negative, the result is -0. 4504 // If one operand is zero and the other is negative, the result is -0.
4494 // - Set Z (eq) if either left or right, or both, are 0. 4505 // - Set Z (eq) if either left or right, or both, are 0.
4495 __ Cmp(left, 0); 4506 __ Cmp(left, 0);
4496 __ Ccmp(right, 0, ZFlag, ne); 4507 __ Ccmp(right, 0, ZFlag, ne);
4497 // - If so (eq), set N (mi) if left + right is negative. 4508 // - If so (eq), set N (mi) if left + right is negative.
4498 // - Otherwise, clear N. 4509 // - Otherwise, clear N.
4499 __ Ccmn(left, right, NoFlag, eq); 4510 __ Ccmn(left, right, NoFlag, eq);
4500 DeoptimizeIf(mi, instr, "minus zero"); 4511 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
4501 } 4512 }
4502 4513
4503 if (can_overflow) { 4514 if (can_overflow) {
4504 __ Smull(result.X(), left, right); 4515 __ Smull(result.X(), left, right);
4505 __ Cmp(result.X(), Operand(result, SXTW)); 4516 __ Cmp(result.X(), Operand(result, SXTW));
4506 DeoptimizeIf(ne, instr, "overflow"); 4517 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4507 } else { 4518 } else {
4508 __ Mul(result, left, right); 4519 __ Mul(result, left, right);
4509 } 4520 }
4510 } 4521 }
4511 4522
4512 4523
4513 void LCodeGen::DoMulS(LMulS* instr) { 4524 void LCodeGen::DoMulS(LMulS* instr) {
4514 Register result = ToRegister(instr->result()); 4525 Register result = ToRegister(instr->result());
4515 Register left = ToRegister(instr->left()); 4526 Register left = ToRegister(instr->left());
4516 Register right = ToRegister(instr->right()); 4527 Register right = ToRegister(instr->right());
4517 4528
4518 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 4529 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4519 bool bailout_on_minus_zero = 4530 bool bailout_on_minus_zero =
4520 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 4531 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4521 4532
4522 if (bailout_on_minus_zero && !left.Is(right)) { 4533 if (bailout_on_minus_zero && !left.Is(right)) {
4523 // If one operand is zero and the other is negative, the result is -0. 4534 // If one operand is zero and the other is negative, the result is -0.
4524 // - Set Z (eq) if either left or right, or both, are 0. 4535 // - Set Z (eq) if either left or right, or both, are 0.
4525 __ Cmp(left, 0); 4536 __ Cmp(left, 0);
4526 __ Ccmp(right, 0, ZFlag, ne); 4537 __ Ccmp(right, 0, ZFlag, ne);
4527 // - If so (eq), set N (mi) if left + right is negative. 4538 // - If so (eq), set N (mi) if left + right is negative.
4528 // - Otherwise, clear N. 4539 // - Otherwise, clear N.
4529 __ Ccmn(left, right, NoFlag, eq); 4540 __ Ccmn(left, right, NoFlag, eq);
4530 DeoptimizeIf(mi, instr, "minus zero"); 4541 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
4531 } 4542 }
4532 4543
4533 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); 4544 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4534 if (can_overflow) { 4545 if (can_overflow) {
4535 __ Smulh(result, left, right); 4546 __ Smulh(result, left, right);
4536 __ Cmp(result, Operand(result.W(), SXTW)); 4547 __ Cmp(result, Operand(result.W(), SXTW));
4537 __ SmiTag(result); 4548 __ SmiTag(result);
4538 DeoptimizeIf(ne, instr, "overflow"); 4549 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4539 } else { 4550 } else {
4540 if (AreAliased(result, left, right)) { 4551 if (AreAliased(result, left, right)) {
4541 // All three registers are the same: half untag the input and then 4552 // All three registers are the same: half untag the input and then
4542 // multiply, giving a tagged result. 4553 // multiply, giving a tagged result.
4543 STATIC_ASSERT((kSmiShift % 2) == 0); 4554 STATIC_ASSERT((kSmiShift % 2) == 0);
4544 __ Asr(result, left, kSmiShift / 2); 4555 __ Asr(result, left, kSmiShift / 2);
4545 __ Mul(result, result, result); 4556 __ Mul(result, result, result);
4546 } else if (result.Is(left) && !left.Is(right)) { 4557 } else if (result.Is(left) && !left.Is(right)) {
4547 // Registers result and left alias, right is distinct: untag left into 4558 // Registers result and left alias, right is distinct: untag left into
4548 // result, and then multiply by right, giving a tagged result. 4559 // result, and then multiply by right, giving a tagged result.
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
4704 // Heap number map check. 4715 // Heap number map check.
4705 if (can_convert_undefined_to_nan) { 4716 if (can_convert_undefined_to_nan) {
4706 __ JumpIfNotHeapNumber(input, &convert_undefined); 4717 __ JumpIfNotHeapNumber(input, &convert_undefined);
4707 } else { 4718 } else {
4708 DeoptimizeIfNotHeapNumber(input, instr); 4719 DeoptimizeIfNotHeapNumber(input, instr);
4709 } 4720 }
4710 4721
4711 // Load heap number. 4722 // Load heap number.
4712 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); 4723 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4713 if (instr->hydrogen()->deoptimize_on_minus_zero()) { 4724 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4714 DeoptimizeIfMinusZero(result, instr, "minus zero"); 4725 DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
4715 } 4726 }
4716 __ B(&done); 4727 __ B(&done);
4717 4728
4718 if (can_convert_undefined_to_nan) { 4729 if (can_convert_undefined_to_nan) {
4719 __ Bind(&convert_undefined); 4730 __ Bind(&convert_undefined);
4720 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, 4731 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
4721 "not a heap number/undefined"); 4732 Deoptimizer::kNotAHeapNumberUndefined);
4722 4733
4723 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 4734 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4724 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); 4735 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4725 __ B(&done); 4736 __ B(&done);
4726 } 4737 }
4727 4738
4728 } else { 4739 } else {
4729 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 4740 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4730 // Fall through to load_smi. 4741 // Fall through to load_smi.
4731 } 4742 }
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after
4905 } 4916 }
4906 } 4917 }
4907 4918
4908 4919
4909 void LCodeGen::DoSmiTag(LSmiTag* instr) { 4920 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4910 HChange* hchange = instr->hydrogen(); 4921 HChange* hchange = instr->hydrogen();
4911 Register input = ToRegister(instr->value()); 4922 Register input = ToRegister(instr->value());
4912 Register output = ToRegister(instr->result()); 4923 Register output = ToRegister(instr->result());
4913 if (hchange->CheckFlag(HValue::kCanOverflow) && 4924 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4914 hchange->value()->CheckFlag(HValue::kUint32)) { 4925 hchange->value()->CheckFlag(HValue::kUint32)) {
4915 DeoptimizeIfNegative(input.W(), instr, "overflow"); 4926 DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
4916 } 4927 }
4917 __ SmiTag(output, input); 4928 __ SmiTag(output, input);
4918 } 4929 }
4919 4930
4920 4931
4921 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4932 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4922 Register input = ToRegister(instr->value()); 4933 Register input = ToRegister(instr->value());
4923 Register result = ToRegister(instr->result()); 4934 Register result = ToRegister(instr->result());
4924 Label done, untag; 4935 Label done, untag;
4925 4936
4926 if (instr->needs_check()) { 4937 if (instr->needs_check()) {
4927 DeoptimizeIfNotSmi(input, instr, "not a Smi"); 4938 DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
4928 } 4939 }
4929 4940
4930 __ Bind(&untag); 4941 __ Bind(&untag);
4931 __ SmiUntag(result, input); 4942 __ SmiUntag(result, input);
4932 __ Bind(&done); 4943 __ Bind(&done);
4933 } 4944 }
4934 4945
4935 4946
4936 void LCodeGen::DoShiftI(LShiftI* instr) { 4947 void LCodeGen::DoShiftI(LShiftI* instr) {
4937 LOperand* right_op = instr->right(); 4948 LOperand* right_op = instr->right();
4938 Register left = ToRegister32(instr->left()); 4949 Register left = ToRegister32(instr->left());
4939 Register result = ToRegister32(instr->result()); 4950 Register result = ToRegister32(instr->result());
4940 4951
4941 if (right_op->IsRegister()) { 4952 if (right_op->IsRegister()) {
4942 Register right = ToRegister32(instr->right()); 4953 Register right = ToRegister32(instr->right());
4943 switch (instr->op()) { 4954 switch (instr->op()) {
4944 case Token::ROR: __ Ror(result, left, right); break; 4955 case Token::ROR: __ Ror(result, left, right); break;
4945 case Token::SAR: __ Asr(result, left, right); break; 4956 case Token::SAR: __ Asr(result, left, right); break;
4946 case Token::SHL: __ Lsl(result, left, right); break; 4957 case Token::SHL: __ Lsl(result, left, right); break;
4947 case Token::SHR: 4958 case Token::SHR:
4948 __ Lsr(result, left, right); 4959 __ Lsr(result, left, right);
4949 if (instr->can_deopt()) { 4960 if (instr->can_deopt()) {
4950 // If `left >>> right` >= 0x80000000, the result is not representable 4961 // If `left >>> right` >= 0x80000000, the result is not representable
4951 // in a signed 32-bit smi. 4962 // in a signed 32-bit smi.
4952 DeoptimizeIfNegative(result, instr, "negative value"); 4963 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
4953 } 4964 }
4954 break; 4965 break;
4955 default: UNREACHABLE(); 4966 default: UNREACHABLE();
4956 } 4967 }
4957 } else { 4968 } else {
4958 DCHECK(right_op->IsConstantOperand()); 4969 DCHECK(right_op->IsConstantOperand());
4959 int shift_count = JSShiftAmountFromLConstant(right_op); 4970 int shift_count = JSShiftAmountFromLConstant(right_op);
4960 if (shift_count == 0) { 4971 if (shift_count == 0) {
4961 if ((instr->op() == Token::SHR) && instr->can_deopt()) { 4972 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4962 DeoptimizeIfNegative(left, instr, "negative value"); 4973 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
4963 } 4974 }
4964 __ Mov(result, left, kDiscardForSameWReg); 4975 __ Mov(result, left, kDiscardForSameWReg);
4965 } else { 4976 } else {
4966 switch (instr->op()) { 4977 switch (instr->op()) {
4967 case Token::ROR: __ Ror(result, left, shift_count); break; 4978 case Token::ROR: __ Ror(result, left, shift_count); break;
4968 case Token::SAR: __ Asr(result, left, shift_count); break; 4979 case Token::SAR: __ Asr(result, left, shift_count); break;
4969 case Token::SHL: __ Lsl(result, left, shift_count); break; 4980 case Token::SHL: __ Lsl(result, left, shift_count); break;
4970 case Token::SHR: __ Lsr(result, left, shift_count); break; 4981 case Token::SHR: __ Lsr(result, left, shift_count); break;
4971 default: UNREACHABLE(); 4982 default: UNREACHABLE();
4972 } 4983 }
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
5005 break; 5016 break;
5006 case Token::SHL: 5017 case Token::SHL:
5007 __ Lsl(result, left, result); 5018 __ Lsl(result, left, result);
5008 break; 5019 break;
5009 case Token::SHR: 5020 case Token::SHR:
5010 __ Lsr(result, left, result); 5021 __ Lsr(result, left, result);
5011 __ Bic(result, result, kSmiShiftMask); 5022 __ Bic(result, result, kSmiShiftMask);
5012 if (instr->can_deopt()) { 5023 if (instr->can_deopt()) {
5013 // If `left >>> right` >= 0x80000000, the result is not representable 5024 // If `left >>> right` >= 0x80000000, the result is not representable
5014 // in a signed 32-bit smi. 5025 // in a signed 32-bit smi.
5015 DeoptimizeIfNegative(result, instr, "negative value"); 5026 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
5016 } 5027 }
5017 break; 5028 break;
5018 default: UNREACHABLE(); 5029 default: UNREACHABLE();
5019 } 5030 }
5020 } else { 5031 } else {
5021 DCHECK(right_op->IsConstantOperand()); 5032 DCHECK(right_op->IsConstantOperand());
5022 int shift_count = JSShiftAmountFromLConstant(right_op); 5033 int shift_count = JSShiftAmountFromLConstant(right_op);
5023 if (shift_count == 0) { 5034 if (shift_count == 0) {
5024 if ((instr->op() == Token::SHR) && instr->can_deopt()) { 5035 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
5025 DeoptimizeIfNegative(left, instr, "negative value"); 5036 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
5026 } 5037 }
5027 __ Mov(result, left); 5038 __ Mov(result, left);
5028 } else { 5039 } else {
5029 switch (instr->op()) { 5040 switch (instr->op()) {
5030 case Token::ROR: 5041 case Token::ROR:
5031 __ SmiUntag(result, left); 5042 __ SmiUntag(result, left);
5032 __ Ror(result.W(), result.W(), shift_count); 5043 __ Ror(result.W(), result.W(), shift_count);
5033 __ SmiTag(result); 5044 __ SmiTag(result);
5034 break; 5045 break;
5035 case Token::SAR: 5046 case Token::SAR:
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
5143 Register context = ToRegister(instr->context()); 5154 Register context = ToRegister(instr->context());
5144 Register value = ToRegister(instr->value()); 5155 Register value = ToRegister(instr->value());
5145 Register scratch = ToRegister(instr->temp()); 5156 Register scratch = ToRegister(instr->temp());
5146 MemOperand target = ContextMemOperand(context, instr->slot_index()); 5157 MemOperand target = ContextMemOperand(context, instr->slot_index());
5147 5158
5148 Label skip_assignment; 5159 Label skip_assignment;
5149 5160
5150 if (instr->hydrogen()->RequiresHoleCheck()) { 5161 if (instr->hydrogen()->RequiresHoleCheck()) {
5151 __ Ldr(scratch, target); 5162 __ Ldr(scratch, target);
5152 if (instr->hydrogen()->DeoptimizesOnHole()) { 5163 if (instr->hydrogen()->DeoptimizesOnHole()) {
5153 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, "hole"); 5164 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
5165 Deoptimizer::kHole);
5154 } else { 5166 } else {
5155 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); 5167 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5156 } 5168 }
5157 } 5169 }
5158 5170
5159 __ Str(value, target); 5171 __ Str(value, target);
5160 if (instr->hydrogen()->NeedsWriteBarrier()) { 5172 if (instr->hydrogen()->NeedsWriteBarrier()) {
5161 SmiCheck check_needed = 5173 SmiCheck check_needed =
5162 instr->hydrogen()->value()->type().IsHeapObject() 5174 instr->hydrogen()->value()->type().IsHeapObject()
5163 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 5175 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
(...skipping 17 matching lines...) Expand all
5181 // Load the cell. 5193 // Load the cell.
5182 __ Mov(cell, Operand(instr->hydrogen()->cell().handle())); 5194 __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
5183 5195
5184 // If the cell we are storing to contains the hole it could have 5196 // If the cell we are storing to contains the hole it could have
5185 // been deleted from the property dictionary. In that case, we need 5197 // been deleted from the property dictionary. In that case, we need
5186 // to update the property details in the property dictionary to mark 5198 // to update the property details in the property dictionary to mark
5187 // it as no longer deleted. We deoptimize in that case. 5199 // it as no longer deleted. We deoptimize in that case.
5188 if (instr->hydrogen()->RequiresHoleCheck()) { 5200 if (instr->hydrogen()->RequiresHoleCheck()) {
5189 Register payload = ToRegister(instr->temp2()); 5201 Register payload = ToRegister(instr->temp2());
5190 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); 5202 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
5191 DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, "hole"); 5203 DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr,
5204 Deoptimizer::kHole);
5192 } 5205 }
5193 5206
5194 // Store the value. 5207 // Store the value.
5195 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset)); 5208 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
5196 // Cells are always rescanned, so no write barrier here. 5209 // Cells are always rescanned, so no write barrier here.
5197 } 5210 }
5198 5211
5199 5212
5200 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { 5213 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5201 Register ext_ptr = ToRegister(instr->elements()); 5214 Register ext_ptr = ToRegister(instr->elements());
(...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after
5603 5616
5604 5617
5605 void LCodeGen::DoSubI(LSubI* instr) { 5618 void LCodeGen::DoSubI(LSubI* instr) {
5606 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 5619 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5607 Register result = ToRegister32(instr->result()); 5620 Register result = ToRegister32(instr->result());
5608 Register left = ToRegister32(instr->left()); 5621 Register left = ToRegister32(instr->left());
5609 Operand right = ToShiftedRightOperand32(instr->right(), instr); 5622 Operand right = ToShiftedRightOperand32(instr->right(), instr);
5610 5623
5611 if (can_overflow) { 5624 if (can_overflow) {
5612 __ Subs(result, left, right); 5625 __ Subs(result, left, right);
5613 DeoptimizeIf(vs, instr, "overflow"); 5626 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
5614 } else { 5627 } else {
5615 __ Sub(result, left, right); 5628 __ Sub(result, left, right);
5616 } 5629 }
5617 } 5630 }
5618 5631
5619 5632
5620 void LCodeGen::DoSubS(LSubS* instr) { 5633 void LCodeGen::DoSubS(LSubS* instr) {
5621 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 5634 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5622 Register result = ToRegister(instr->result()); 5635 Register result = ToRegister(instr->result());
5623 Register left = ToRegister(instr->left()); 5636 Register left = ToRegister(instr->left());
5624 Operand right = ToOperand(instr->right()); 5637 Operand right = ToOperand(instr->right());
5625 if (can_overflow) { 5638 if (can_overflow) {
5626 __ Subs(result, left, right); 5639 __ Subs(result, left, right);
5627 DeoptimizeIf(vs, instr, "overflow"); 5640 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
5628 } else { 5641 } else {
5629 __ Sub(result, left, right); 5642 __ Sub(result, left, right);
5630 } 5643 }
5631 } 5644 }
5632 5645
5633 5646
5634 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, 5647 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5635 LOperand* value, 5648 LOperand* value,
5636 LOperand* temp1, 5649 LOperand* temp1,
5637 LOperand* temp2) { 5650 LOperand* temp2) {
(...skipping 20 matching lines...) Expand all
5658 Register false_root = scratch1; 5671 Register false_root = scratch1;
5659 __ LoadTrueFalseRoots(true_root, false_root); 5672 __ LoadTrueFalseRoots(true_root, false_root);
5660 __ Cmp(input, true_root); 5673 __ Cmp(input, true_root);
5661 __ Cset(output, eq); 5674 __ Cset(output, eq);
5662 __ Ccmp(input, false_root, ZFlag, ne); 5675 __ Ccmp(input, false_root, ZFlag, ne);
5663 __ B(eq, &done); 5676 __ B(eq, &done);
5664 5677
5665 // Output contains zero, undefined is converted to zero for truncating 5678 // Output contains zero, undefined is converted to zero for truncating
5666 // conversions. 5679 // conversions.
5667 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, 5680 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
5668 "not a heap number/undefined/true/false"); 5681 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5669 } else { 5682 } else {
5670 Register output = ToRegister32(instr->result()); 5683 Register output = ToRegister32(instr->result());
5671 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); 5684 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5672 5685
5673 DeoptimizeIfNotHeapNumber(input, instr); 5686 DeoptimizeIfNotHeapNumber(input, instr);
5674 5687
5675 // A heap number: load value and convert to int32 using non-truncating 5688 // A heap number: load value and convert to int32 using non-truncating
5676 // function. If the result is out of range, branch to deoptimize. 5689 // function. If the result is out of range, branch to deoptimize.
5677 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); 5690 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5678 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); 5691 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
5679 DeoptimizeIf(ne, instr, "lost precision or NaN"); 5692 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5680 5693
5681 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5694 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5682 __ Cmp(output, 0); 5695 __ Cmp(output, 0);
5683 __ B(ne, &done); 5696 __ B(ne, &done);
5684 __ Fmov(scratch1, dbl_scratch1); 5697 __ Fmov(scratch1, dbl_scratch1);
5685 DeoptimizeIfNegative(scratch1, instr, "minus zero"); 5698 DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
5686 } 5699 }
5687 } 5700 }
5688 __ Bind(&done); 5701 __ Bind(&done);
5689 } 5702 }
5690 5703
5691 5704
5692 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 5705 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5693 class DeferredTaggedToI: public LDeferredCode { 5706 class DeferredTaggedToI: public LDeferredCode {
5694 public: 5707 public:
5695 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 5708 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
5816 } 5829 }
5817 5830
5818 5831
5819 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 5832 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5820 Register object = ToRegister(instr->object()); 5833 Register object = ToRegister(instr->object());
5821 Register temp1 = ToRegister(instr->temp1()); 5834 Register temp1 = ToRegister(instr->temp1());
5822 Register temp2 = ToRegister(instr->temp2()); 5835 Register temp2 = ToRegister(instr->temp2());
5823 5836
5824 Label no_memento_found; 5837 Label no_memento_found;
5825 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); 5838 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
5826 DeoptimizeIf(eq, instr, "memento found"); 5839 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
5827 __ Bind(&no_memento_found); 5840 __ Bind(&no_memento_found);
5828 } 5841 }
5829 5842
5830 5843
5831 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) { 5844 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5832 DoubleRegister input = ToDoubleRegister(instr->value()); 5845 DoubleRegister input = ToDoubleRegister(instr->value());
5833 Register result = ToRegister(instr->result()); 5846 Register result = ToRegister(instr->result());
5834 __ TruncateDoubleToI(result, input); 5847 __ TruncateDoubleToI(result, input);
5835 if (instr->tag_result()) { 5848 if (instr->tag_result()) {
5836 __ SmiTag(result, result); 5849 __ SmiTag(result, result);
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
5941 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); 5954 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5942 } 5955 }
5943 5956
5944 5957
5945 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5958 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5946 Register object = ToRegister(instr->value()); 5959 Register object = ToRegister(instr->value());
5947 Register map = ToRegister(instr->map()); 5960 Register map = ToRegister(instr->map());
5948 Register temp = ToRegister(instr->temp()); 5961 Register temp = ToRegister(instr->temp());
5949 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); 5962 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5950 __ Cmp(map, temp); 5963 __ Cmp(map, temp);
5951 DeoptimizeIf(ne, instr, "wrong map"); 5964 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5952 } 5965 }
5953 5966
5954 5967
5955 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 5968 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5956 Register receiver = ToRegister(instr->receiver()); 5969 Register receiver = ToRegister(instr->receiver());
5957 Register function = ToRegister(instr->function()); 5970 Register function = ToRegister(instr->function());
5958 Register result = ToRegister(instr->result()); 5971 Register result = ToRegister(instr->result());
5959 5972
5960 // If the receiver is null or undefined, we have to pass the global object as 5973 // If the receiver is null or undefined, we have to pass the global object as
5961 // a receiver to normal functions. Values have to be passed unchanged to 5974 // a receiver to normal functions. Values have to be passed unchanged to
(...skipping 13 matching lines...) Expand all
5975 5988
5976 // Do not transform the receiver to object for builtins. 5989 // Do not transform the receiver to object for builtins.
5977 __ Tbnz(result, SharedFunctionInfo::kNative, &copy_receiver); 5990 __ Tbnz(result, SharedFunctionInfo::kNative, &copy_receiver);
5978 } 5991 }
5979 5992
5980 // Normal function. Replace undefined or null with global receiver. 5993 // Normal function. Replace undefined or null with global receiver.
5981 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object); 5994 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5982 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); 5995 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5983 5996
5984 // Deoptimize if the receiver is not a JS object. 5997 // Deoptimize if the receiver is not a JS object.
5985 DeoptimizeIfSmi(receiver, instr, "Smi"); 5998 DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
5986 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE); 5999 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5987 __ B(ge, &copy_receiver); 6000 __ B(ge, &copy_receiver);
5988 Deoptimize(instr, "not a JavaScript object"); 6001 Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
5989 6002
5990 __ Bind(&global_object); 6003 __ Bind(&global_object);
5991 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); 6004 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
5992 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX)); 6005 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
5993 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); 6006 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
5994 __ B(&done); 6007 __ B(&done);
5995 6008
5996 __ Bind(&copy_receiver); 6009 __ Bind(&copy_receiver);
5997 __ Mov(result, receiver); 6010 __ Mov(result, receiver);
5998 __ Bind(&done); 6011 __ Bind(&done);
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
6085 Handle<ScopeInfo> scope_info = instr->scope_info(); 6098 Handle<ScopeInfo> scope_info = instr->scope_info();
6086 __ Push(scope_info); 6099 __ Push(scope_info);
6087 __ Push(ToRegister(instr->function())); 6100 __ Push(ToRegister(instr->function()));
6088 CallRuntime(Runtime::kPushBlockContext, 2, instr); 6101 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6089 RecordSafepoint(Safepoint::kNoLazyDeopt); 6102 RecordSafepoint(Safepoint::kNoLazyDeopt);
6090 } 6103 }
6091 6104
6092 6105
6093 6106
6094 } } // namespace v8::internal 6107 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm64/lithium-codegen-arm64.h ('k') | src/assembler.h » ('j') | src/assembler.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698