OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/arm64/lithium-codegen-arm64.h" | 7 #include "src/arm64/lithium-codegen-arm64.h" |
8 #include "src/arm64/lithium-gap-resolver-arm64.h" | 8 #include "src/arm64/lithium-gap-resolver-arm64.h" |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
(...skipping 1045 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1056 // jump entry if this is the case. | 1056 // jump entry if this is the case. |
1057 if (jump_table_.is_empty() || | 1057 if (jump_table_.is_empty() || |
1058 !table_entry->IsEquivalentTo(*jump_table_.last())) { | 1058 !table_entry->IsEquivalentTo(*jump_table_.last())) { |
1059 jump_table_.Add(table_entry, zone()); | 1059 jump_table_.Add(table_entry, zone()); |
1060 } | 1060 } |
1061 __ B(&jump_table_.last()->label, branch_type, reg, bit); | 1061 __ B(&jump_table_.last()->label, branch_type, reg, bit); |
1062 } | 1062 } |
1063 } | 1063 } |
1064 | 1064 |
1065 | 1065 |
1066 void LCodeGen::Deoptimize(LInstruction* instr, | 1066 void LCodeGen::Deoptimize(LInstruction* instr, const char* detail, |
1067 Deoptimizer::BailoutType* override_bailout_type, | 1067 Deoptimizer::BailoutType* override_bailout_type) { |
1068 const char* detail) { | |
1069 DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type); | 1068 DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type); |
1070 } | 1069 } |
1071 | 1070 |
1072 | 1071 |
1073 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, | 1072 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, |
1074 const char* detail) { | 1073 const char* detail) { |
1075 DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond)); | 1074 DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond)); |
1076 } | 1075 } |
1077 | 1076 |
1078 | 1077 |
(...skipping 430 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1509 | 1508 |
1510 | 1509 |
1511 void LCodeGen::DoAddI(LAddI* instr) { | 1510 void LCodeGen::DoAddI(LAddI* instr) { |
1512 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1511 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
1513 Register result = ToRegister32(instr->result()); | 1512 Register result = ToRegister32(instr->result()); |
1514 Register left = ToRegister32(instr->left()); | 1513 Register left = ToRegister32(instr->left()); |
1515 Operand right = ToShiftedRightOperand32(instr->right(), instr); | 1514 Operand right = ToShiftedRightOperand32(instr->right(), instr); |
1516 | 1515 |
1517 if (can_overflow) { | 1516 if (can_overflow) { |
1518 __ Adds(result, left, right); | 1517 __ Adds(result, left, right); |
1519 DeoptimizeIf(vs, instr); | 1518 DeoptimizeIf(vs, instr, "overflow"); |
1520 } else { | 1519 } else { |
1521 __ Add(result, left, right); | 1520 __ Add(result, left, right); |
1522 } | 1521 } |
1523 } | 1522 } |
1524 | 1523 |
1525 | 1524 |
1526 void LCodeGen::DoAddS(LAddS* instr) { | 1525 void LCodeGen::DoAddS(LAddS* instr) { |
1527 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1526 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
1528 Register result = ToRegister(instr->result()); | 1527 Register result = ToRegister(instr->result()); |
1529 Register left = ToRegister(instr->left()); | 1528 Register left = ToRegister(instr->left()); |
1530 Operand right = ToOperand(instr->right()); | 1529 Operand right = ToOperand(instr->right()); |
1531 if (can_overflow) { | 1530 if (can_overflow) { |
1532 __ Adds(result, left, right); | 1531 __ Adds(result, left, right); |
1533 DeoptimizeIf(vs, instr); | 1532 DeoptimizeIf(vs, instr, "overflow"); |
1534 } else { | 1533 } else { |
1535 __ Add(result, left, right); | 1534 __ Add(result, left, right); |
1536 } | 1535 } |
1537 } | 1536 } |
1538 | 1537 |
1539 | 1538 |
1540 void LCodeGen::DoAllocate(LAllocate* instr) { | 1539 void LCodeGen::DoAllocate(LAllocate* instr) { |
1541 class DeferredAllocate: public LDeferredCode { | 1540 class DeferredAllocate: public LDeferredCode { |
1542 public: | 1541 public: |
1543 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) | 1542 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1649 Register scratch = x5; | 1648 Register scratch = x5; |
1650 DCHECK(receiver.Is(x0)); // Used for parameter count. | 1649 DCHECK(receiver.Is(x0)); // Used for parameter count. |
1651 DCHECK(function.Is(x1)); // Required by InvokeFunction. | 1650 DCHECK(function.Is(x1)); // Required by InvokeFunction. |
1652 DCHECK(ToRegister(instr->result()).Is(x0)); | 1651 DCHECK(ToRegister(instr->result()).Is(x0)); |
1653 DCHECK(instr->IsMarkedAsCall()); | 1652 DCHECK(instr->IsMarkedAsCall()); |
1654 | 1653 |
1655 // Copy the arguments to this function possibly from the | 1654 // Copy the arguments to this function possibly from the |
1656 // adaptor frame below it. | 1655 // adaptor frame below it. |
1657 const uint32_t kArgumentsLimit = 1 * KB; | 1656 const uint32_t kArgumentsLimit = 1 * KB; |
1658 __ Cmp(length, kArgumentsLimit); | 1657 __ Cmp(length, kArgumentsLimit); |
1659 DeoptimizeIf(hi, instr); | 1658 DeoptimizeIf(hi, instr, "too many arguments"); |
1660 | 1659 |
1661 // Push the receiver and use the register to keep the original | 1660 // Push the receiver and use the register to keep the original |
1662 // number of arguments. | 1661 // number of arguments. |
1663 __ Push(receiver); | 1662 __ Push(receiver); |
1664 Register argc = receiver; | 1663 Register argc = receiver; |
1665 receiver = NoReg; | 1664 receiver = NoReg; |
1666 __ Sxtw(argc, length); | 1665 __ Sxtw(argc, length); |
1667 // The arguments are at a one pointer size offset from elements. | 1666 // The arguments are at a one pointer size offset from elements. |
1668 __ Add(elements, elements, 1 * kPointerSize); | 1667 __ Add(elements, elements, 1 * kPointerSize); |
1669 | 1668 |
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1831 __ Cmp(length, index); | 1830 __ Cmp(length, index); |
1832 cond = CommuteCondition(cond); | 1831 cond = CommuteCondition(cond); |
1833 } else { | 1832 } else { |
1834 Register index = ToRegister32(instr->index()); | 1833 Register index = ToRegister32(instr->index()); |
1835 Operand length = ToOperand32(instr->length()); | 1834 Operand length = ToOperand32(instr->length()); |
1836 __ Cmp(index, length); | 1835 __ Cmp(index, length); |
1837 } | 1836 } |
1838 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 1837 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
1839 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); | 1838 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); |
1840 } else { | 1839 } else { |
1841 DeoptimizeIf(cond, instr); | 1840 DeoptimizeIf(cond, instr, "out of bounds"); |
1842 } | 1841 } |
1843 } | 1842 } |
1844 | 1843 |
1845 | 1844 |
1846 void LCodeGen::DoBranch(LBranch* instr) { | 1845 void LCodeGen::DoBranch(LBranch* instr) { |
1847 Representation r = instr->hydrogen()->value()->representation(); | 1846 Representation r = instr->hydrogen()->value()->representation(); |
1848 Label* true_label = instr->TrueLabel(chunk_); | 1847 Label* true_label = instr->TrueLabel(chunk_); |
1849 Label* false_label = instr->FalseLabel(chunk_); | 1848 Label* false_label = instr->FalseLabel(chunk_); |
1850 | 1849 |
1851 if (r.IsInteger32()) { | 1850 if (r.IsInteger32()) { |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1910 value, Heap::kNullValueRootIndex, false_label); | 1909 value, Heap::kNullValueRootIndex, false_label); |
1911 } | 1910 } |
1912 | 1911 |
1913 if (expected.Contains(ToBooleanStub::SMI)) { | 1912 if (expected.Contains(ToBooleanStub::SMI)) { |
1914 // Smis: 0 -> false, all other -> true. | 1913 // Smis: 0 -> false, all other -> true. |
1915 DCHECK(Smi::FromInt(0) == 0); | 1914 DCHECK(Smi::FromInt(0) == 0); |
1916 __ Cbz(value, false_label); | 1915 __ Cbz(value, false_label); |
1917 __ JumpIfSmi(value, true_label); | 1916 __ JumpIfSmi(value, true_label); |
1918 } else if (expected.NeedsMap()) { | 1917 } else if (expected.NeedsMap()) { |
1919 // If we need a map later and have a smi, deopt. | 1918 // If we need a map later and have a smi, deopt. |
1920 DeoptimizeIfSmi(value, instr); | 1919 DeoptimizeIfSmi(value, instr, "Smi"); |
1921 } | 1920 } |
1922 | 1921 |
1923 Register map = NoReg; | 1922 Register map = NoReg; |
1924 Register scratch = NoReg; | 1923 Register scratch = NoReg; |
1925 | 1924 |
1926 if (expected.NeedsMap()) { | 1925 if (expected.NeedsMap()) { |
1927 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); | 1926 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); |
1928 map = ToRegister(instr->temp1()); | 1927 map = ToRegister(instr->temp1()); |
1929 scratch = ToRegister(instr->temp2()); | 1928 scratch = ToRegister(instr->temp2()); |
1930 | 1929 |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1971 // If we got a NaN (overflow bit is set), jump to the false branch. | 1970 // If we got a NaN (overflow bit is set), jump to the false branch. |
1972 __ B(vs, false_label); | 1971 __ B(vs, false_label); |
1973 __ B(eq, false_label); | 1972 __ B(eq, false_label); |
1974 __ B(true_label); | 1973 __ B(true_label); |
1975 __ Bind(¬_heap_number); | 1974 __ Bind(¬_heap_number); |
1976 } | 1975 } |
1977 | 1976 |
1978 if (!expected.IsGeneric()) { | 1977 if (!expected.IsGeneric()) { |
1979 // We've seen something for the first time -> deopt. | 1978 // We've seen something for the first time -> deopt. |
1980 // This can only happen if we are not generic already. | 1979 // This can only happen if we are not generic already. |
1981 Deoptimize(instr); | 1980 Deoptimize(instr, "unexpected object"); |
1982 } | 1981 } |
1983 } | 1982 } |
1984 } | 1983 } |
1985 } | 1984 } |
1986 | 1985 |
1987 | 1986 |
1988 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, | 1987 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
1989 int formal_parameter_count, | 1988 int formal_parameter_count, |
1990 int arity, | 1989 int arity, |
1991 LInstruction* instr, | 1990 LInstruction* instr, |
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2156 Register temp = ToRegister(instr->temp()); | 2155 Register temp = ToRegister(instr->temp()); |
2157 { | 2156 { |
2158 PushSafepointRegistersScope scope(this); | 2157 PushSafepointRegistersScope scope(this); |
2159 __ Push(object); | 2158 __ Push(object); |
2160 __ Mov(cp, 0); | 2159 __ Mov(cp, 0); |
2161 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 2160 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
2162 RecordSafepointWithRegisters( | 2161 RecordSafepointWithRegisters( |
2163 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 2162 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
2164 __ StoreToSafepointRegisterSlot(x0, temp); | 2163 __ StoreToSafepointRegisterSlot(x0, temp); |
2165 } | 2164 } |
2166 DeoptimizeIfSmi(temp, instr); | 2165 DeoptimizeIfSmi(temp, instr, "instance migration failed"); |
2167 } | 2166 } |
2168 | 2167 |
2169 | 2168 |
2170 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 2169 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
2171 class DeferredCheckMaps: public LDeferredCode { | 2170 class DeferredCheckMaps: public LDeferredCode { |
2172 public: | 2171 public: |
2173 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 2172 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
2174 : LDeferredCode(codegen), instr_(instr), object_(object) { | 2173 : LDeferredCode(codegen), instr_(instr), object_(object) { |
2175 SetExit(check_maps()); | 2174 SetExit(check_maps()); |
2176 } | 2175 } |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2211 __ CompareMap(map_reg, map); | 2210 __ CompareMap(map_reg, map); |
2212 __ B(eq, &success); | 2211 __ B(eq, &success); |
2213 } | 2212 } |
2214 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 2213 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
2215 __ CompareMap(map_reg, map); | 2214 __ CompareMap(map_reg, map); |
2216 | 2215 |
2217 // We didn't match a map. | 2216 // We didn't match a map. |
2218 if (instr->hydrogen()->HasMigrationTarget()) { | 2217 if (instr->hydrogen()->HasMigrationTarget()) { |
2219 __ B(ne, deferred->entry()); | 2218 __ B(ne, deferred->entry()); |
2220 } else { | 2219 } else { |
2221 DeoptimizeIf(ne, instr); | 2220 DeoptimizeIf(ne, instr, "wrong map"); |
2222 } | 2221 } |
2223 | 2222 |
2224 __ Bind(&success); | 2223 __ Bind(&success); |
2225 } | 2224 } |
2226 | 2225 |
2227 | 2226 |
2228 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 2227 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
2229 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 2228 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
2230 DeoptimizeIfSmi(ToRegister(instr->value()), instr); | 2229 DeoptimizeIfSmi(ToRegister(instr->value()), instr, "Smi"); |
2231 } | 2230 } |
2232 } | 2231 } |
2233 | 2232 |
2234 | 2233 |
2235 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 2234 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
2236 Register value = ToRegister(instr->value()); | 2235 Register value = ToRegister(instr->value()); |
2237 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value)); | 2236 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value)); |
2238 DeoptimizeIfNotSmi(value, instr); | 2237 DeoptimizeIfNotSmi(value, instr, "not a Smi"); |
2239 } | 2238 } |
2240 | 2239 |
2241 | 2240 |
2242 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 2241 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
2243 Register input = ToRegister(instr->value()); | 2242 Register input = ToRegister(instr->value()); |
2244 Register scratch = ToRegister(instr->temp()); | 2243 Register scratch = ToRegister(instr->temp()); |
2245 | 2244 |
2246 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 2245 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
2247 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 2246 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
2248 | 2247 |
2249 if (instr->hydrogen()->is_interval_check()) { | 2248 if (instr->hydrogen()->is_interval_check()) { |
2250 InstanceType first, last; | 2249 InstanceType first, last; |
2251 instr->hydrogen()->GetCheckInterval(&first, &last); | 2250 instr->hydrogen()->GetCheckInterval(&first, &last); |
2252 | 2251 |
2253 __ Cmp(scratch, first); | 2252 __ Cmp(scratch, first); |
2254 if (first == last) { | 2253 if (first == last) { |
2255 // If there is only one type in the interval check for equality. | 2254 // If there is only one type in the interval check for equality. |
2256 DeoptimizeIf(ne, instr); | 2255 DeoptimizeIf(ne, instr, "wrong instance type"); |
2257 } else if (last == LAST_TYPE) { | 2256 } else if (last == LAST_TYPE) { |
2258 // We don't need to compare with the higher bound of the interval. | 2257 // We don't need to compare with the higher bound of the interval. |
2259 DeoptimizeIf(lo, instr); | 2258 DeoptimizeIf(lo, instr, "wrong instance type"); |
2260 } else { | 2259 } else { |
2261 // If we are below the lower bound, set the C flag and clear the Z flag | 2260 // If we are below the lower bound, set the C flag and clear the Z flag |
2262 // to force a deopt. | 2261 // to force a deopt. |
2263 __ Ccmp(scratch, last, CFlag, hs); | 2262 __ Ccmp(scratch, last, CFlag, hs); |
2264 DeoptimizeIf(hi, instr); | 2263 DeoptimizeIf(hi, instr, "wrong instance type"); |
2265 } | 2264 } |
2266 } else { | 2265 } else { |
2267 uint8_t mask; | 2266 uint8_t mask; |
2268 uint8_t tag; | 2267 uint8_t tag; |
2269 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 2268 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
2270 | 2269 |
2271 if (base::bits::IsPowerOfTwo32(mask)) { | 2270 if (base::bits::IsPowerOfTwo32(mask)) { |
2272 DCHECK((tag == 0) || (tag == mask)); | 2271 DCHECK((tag == 0) || (tag == mask)); |
2273 if (tag == 0) { | 2272 if (tag == 0) { |
2274 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr); | 2273 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr, |
| 2274 "wrong instance type"); |
2275 } else { | 2275 } else { |
2276 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr); | 2276 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr, |
| 2277 "wrong instance type"); |
2277 } | 2278 } |
2278 } else { | 2279 } else { |
2279 if (tag == 0) { | 2280 if (tag == 0) { |
2280 __ Tst(scratch, mask); | 2281 __ Tst(scratch, mask); |
2281 } else { | 2282 } else { |
2282 __ And(scratch, scratch, mask); | 2283 __ And(scratch, scratch, mask); |
2283 __ Cmp(scratch, tag); | 2284 __ Cmp(scratch, tag); |
2284 } | 2285 } |
2285 DeoptimizeIf(ne, instr); | 2286 DeoptimizeIf(ne, instr, "wrong instance type"); |
2286 } | 2287 } |
2287 } | 2288 } |
2288 } | 2289 } |
2289 | 2290 |
2290 | 2291 |
2291 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 2292 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
2292 DoubleRegister input = ToDoubleRegister(instr->unclamped()); | 2293 DoubleRegister input = ToDoubleRegister(instr->unclamped()); |
2293 Register result = ToRegister32(instr->result()); | 2294 Register result = ToRegister32(instr->result()); |
2294 __ ClampDoubleToUint8(result, input, double_scratch()); | 2295 __ ClampDoubleToUint8(result, input, double_scratch()); |
2295 } | 2296 } |
(...skipping 18 matching lines...) Expand all Loading... |
2314 __ ClampInt32ToUint8(result); | 2315 __ ClampInt32ToUint8(result); |
2315 __ B(&done); | 2316 __ B(&done); |
2316 | 2317 |
2317 __ Bind(&is_not_smi); | 2318 __ Bind(&is_not_smi); |
2318 | 2319 |
2319 // Check for heap number. | 2320 // Check for heap number. |
2320 Label is_heap_number; | 2321 Label is_heap_number; |
2321 __ JumpIfHeapNumber(input, &is_heap_number); | 2322 __ JumpIfHeapNumber(input, &is_heap_number); |
2322 | 2323 |
2323 // Check for undefined. Undefined is coverted to zero for clamping conversion. | 2324 // Check for undefined. Undefined is coverted to zero for clamping conversion. |
2324 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr); | 2325 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, |
| 2326 "not a heap number/undefined"); |
2325 __ Mov(result, 0); | 2327 __ Mov(result, 0); |
2326 __ B(&done); | 2328 __ B(&done); |
2327 | 2329 |
2328 // Heap number case. | 2330 // Heap number case. |
2329 __ Bind(&is_heap_number); | 2331 __ Bind(&is_heap_number); |
2330 DoubleRegister dbl_scratch = double_scratch(); | 2332 DoubleRegister dbl_scratch = double_scratch(); |
2331 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1()); | 2333 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1()); |
2332 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); | 2334 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); |
2333 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); | 2335 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); |
2334 | 2336 |
(...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2619 if (isolate()->heap()->InNewSpace(*object)) { | 2621 if (isolate()->heap()->InNewSpace(*object)) { |
2620 UseScratchRegisterScope temps(masm()); | 2622 UseScratchRegisterScope temps(masm()); |
2621 Register temp = temps.AcquireX(); | 2623 Register temp = temps.AcquireX(); |
2622 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 2624 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
2623 __ Mov(temp, Operand(Handle<Object>(cell))); | 2625 __ Mov(temp, Operand(Handle<Object>(cell))); |
2624 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); | 2626 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); |
2625 __ Cmp(reg, temp); | 2627 __ Cmp(reg, temp); |
2626 } else { | 2628 } else { |
2627 __ Cmp(reg, Operand(object)); | 2629 __ Cmp(reg, Operand(object)); |
2628 } | 2630 } |
2629 DeoptimizeIf(ne, instr); | 2631 DeoptimizeIf(ne, instr, "value mismatch"); |
2630 } | 2632 } |
2631 | 2633 |
2632 | 2634 |
2633 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { | 2635 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
2634 last_lazy_deopt_pc_ = masm()->pc_offset(); | 2636 last_lazy_deopt_pc_ = masm()->pc_offset(); |
2635 DCHECK(instr->HasEnvironment()); | 2637 DCHECK(instr->HasEnvironment()); |
2636 LEnvironment* env = instr->environment(); | 2638 LEnvironment* env = instr->environment(); |
2637 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 2639 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
2638 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 2640 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
2639 } | 2641 } |
2640 | 2642 |
2641 | 2643 |
2642 void LCodeGen::DoDateField(LDateField* instr) { | 2644 void LCodeGen::DoDateField(LDateField* instr) { |
2643 Register object = ToRegister(instr->date()); | 2645 Register object = ToRegister(instr->date()); |
2644 Register result = ToRegister(instr->result()); | 2646 Register result = ToRegister(instr->result()); |
2645 Register temp1 = x10; | 2647 Register temp1 = x10; |
2646 Register temp2 = x11; | 2648 Register temp2 = x11; |
2647 Smi* index = instr->index(); | 2649 Smi* index = instr->index(); |
2648 Label runtime, done; | 2650 Label runtime, done; |
2649 | 2651 |
2650 DCHECK(object.is(result) && object.Is(x0)); | 2652 DCHECK(object.is(result) && object.Is(x0)); |
2651 DCHECK(instr->IsMarkedAsCall()); | 2653 DCHECK(instr->IsMarkedAsCall()); |
2652 | 2654 |
2653 DeoptimizeIfSmi(object, instr); | 2655 DeoptimizeIfSmi(object, instr, "Smi"); |
2654 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE); | 2656 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE); |
2655 DeoptimizeIf(ne, instr); | 2657 DeoptimizeIf(ne, instr, "not a date object"); |
2656 | 2658 |
2657 if (index->value() == 0) { | 2659 if (index->value() == 0) { |
2658 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); | 2660 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); |
2659 } else { | 2661 } else { |
2660 if (index->value() < JSDate::kFirstUncachedField) { | 2662 if (index->value() < JSDate::kFirstUncachedField) { |
2661 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 2663 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
2662 __ Mov(temp1, Operand(stamp)); | 2664 __ Mov(temp1, Operand(stamp)); |
2663 __ Ldr(temp1, MemOperand(temp1)); | 2665 __ Ldr(temp1, MemOperand(temp1)); |
2664 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset)); | 2666 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset)); |
2665 __ Cmp(temp1, temp2); | 2667 __ Cmp(temp1, temp2); |
(...skipping 15 matching lines...) Expand all Loading... |
2681 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { | 2683 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { |
2682 Deoptimizer::BailoutType type = instr->hydrogen()->type(); | 2684 Deoptimizer::BailoutType type = instr->hydrogen()->type(); |
2683 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the | 2685 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the |
2684 // needed return address), even though the implementation of LAZY and EAGER is | 2686 // needed return address), even though the implementation of LAZY and EAGER is |
2685 // now identical. When LAZY is eventually completely folded into EAGER, remove | 2687 // now identical. When LAZY is eventually completely folded into EAGER, remove |
2686 // the special case below. | 2688 // the special case below. |
2687 if (info()->IsStub() && (type == Deoptimizer::EAGER)) { | 2689 if (info()->IsStub() && (type == Deoptimizer::EAGER)) { |
2688 type = Deoptimizer::LAZY; | 2690 type = Deoptimizer::LAZY; |
2689 } | 2691 } |
2690 | 2692 |
2691 Deoptimize(instr, &type, instr->hydrogen()->reason()); | 2693 Deoptimize(instr, instr->hydrogen()->reason(), &type); |
2692 } | 2694 } |
2693 | 2695 |
2694 | 2696 |
2695 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 2697 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
2696 Register dividend = ToRegister32(instr->dividend()); | 2698 Register dividend = ToRegister32(instr->dividend()); |
2697 int32_t divisor = instr->divisor(); | 2699 int32_t divisor = instr->divisor(); |
2698 Register result = ToRegister32(instr->result()); | 2700 Register result = ToRegister32(instr->result()); |
2699 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 2701 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
2700 DCHECK(!result.is(dividend)); | 2702 DCHECK(!result.is(dividend)); |
2701 | 2703 |
2702 // Check for (0 / -x) that will produce negative zero. | 2704 // Check for (0 / -x) that will produce negative zero. |
2703 HDiv* hdiv = instr->hydrogen(); | 2705 HDiv* hdiv = instr->hydrogen(); |
2704 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 2706 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
2705 DeoptimizeIfZero(dividend, instr); | 2707 DeoptimizeIfZero(dividend, instr, "division by zero"); |
2706 } | 2708 } |
2707 // Check for (kMinInt / -1). | 2709 // Check for (kMinInt / -1). |
2708 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 2710 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
2709 // Test dividend for kMinInt by subtracting one (cmp) and checking for | 2711 // Test dividend for kMinInt by subtracting one (cmp) and checking for |
2710 // overflow. | 2712 // overflow. |
2711 __ Cmp(dividend, 1); | 2713 __ Cmp(dividend, 1); |
2712 DeoptimizeIf(vs, instr); | 2714 DeoptimizeIf(vs, instr, "overflow"); |
2713 } | 2715 } |
2714 // Deoptimize if remainder will not be 0. | 2716 // Deoptimize if remainder will not be 0. |
2715 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 2717 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
2716 divisor != 1 && divisor != -1) { | 2718 divisor != 1 && divisor != -1) { |
2717 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 2719 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
2718 __ Tst(dividend, mask); | 2720 __ Tst(dividend, mask); |
2719 DeoptimizeIf(ne, instr); | 2721 DeoptimizeIf(ne, instr, "lost precision"); |
2720 } | 2722 } |
2721 | 2723 |
2722 if (divisor == -1) { // Nice shortcut, not needed for correctness. | 2724 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
2723 __ Neg(result, dividend); | 2725 __ Neg(result, dividend); |
2724 return; | 2726 return; |
2725 } | 2727 } |
2726 int32_t shift = WhichPowerOf2Abs(divisor); | 2728 int32_t shift = WhichPowerOf2Abs(divisor); |
2727 if (shift == 0) { | 2729 if (shift == 0) { |
2728 __ Mov(result, dividend); | 2730 __ Mov(result, dividend); |
2729 } else if (shift == 1) { | 2731 } else if (shift == 1) { |
2730 __ Add(result, dividend, Operand(dividend, LSR, 31)); | 2732 __ Add(result, dividend, Operand(dividend, LSR, 31)); |
2731 } else { | 2733 } else { |
2732 __ Mov(result, Operand(dividend, ASR, 31)); | 2734 __ Mov(result, Operand(dividend, ASR, 31)); |
2733 __ Add(result, dividend, Operand(result, LSR, 32 - shift)); | 2735 __ Add(result, dividend, Operand(result, LSR, 32 - shift)); |
2734 } | 2736 } |
2735 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); | 2737 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); |
2736 if (divisor < 0) __ Neg(result, result); | 2738 if (divisor < 0) __ Neg(result, result); |
2737 } | 2739 } |
2738 | 2740 |
2739 | 2741 |
2740 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 2742 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
2741 Register dividend = ToRegister32(instr->dividend()); | 2743 Register dividend = ToRegister32(instr->dividend()); |
2742 int32_t divisor = instr->divisor(); | 2744 int32_t divisor = instr->divisor(); |
2743 Register result = ToRegister32(instr->result()); | 2745 Register result = ToRegister32(instr->result()); |
2744 DCHECK(!AreAliased(dividend, result)); | 2746 DCHECK(!AreAliased(dividend, result)); |
2745 | 2747 |
2746 if (divisor == 0) { | 2748 if (divisor == 0) { |
2747 Deoptimize(instr); | 2749 Deoptimize(instr, "division by zero"); |
2748 return; | 2750 return; |
2749 } | 2751 } |
2750 | 2752 |
2751 // Check for (0 / -x) that will produce negative zero. | 2753 // Check for (0 / -x) that will produce negative zero. |
2752 HDiv* hdiv = instr->hydrogen(); | 2754 HDiv* hdiv = instr->hydrogen(); |
2753 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 2755 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
2754 DeoptimizeIfZero(dividend, instr); | 2756 DeoptimizeIfZero(dividend, instr, "minus zero"); |
2755 } | 2757 } |
2756 | 2758 |
2757 __ TruncatingDiv(result, dividend, Abs(divisor)); | 2759 __ TruncatingDiv(result, dividend, Abs(divisor)); |
2758 if (divisor < 0) __ Neg(result, result); | 2760 if (divisor < 0) __ Neg(result, result); |
2759 | 2761 |
2760 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 2762 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
2761 Register temp = ToRegister32(instr->temp()); | 2763 Register temp = ToRegister32(instr->temp()); |
2762 DCHECK(!AreAliased(dividend, result, temp)); | 2764 DCHECK(!AreAliased(dividend, result, temp)); |
2763 __ Sxtw(dividend.X(), dividend); | 2765 __ Sxtw(dividend.X(), dividend); |
2764 __ Mov(temp, divisor); | 2766 __ Mov(temp, divisor); |
2765 __ Smsubl(temp.X(), result, temp, dividend.X()); | 2767 __ Smsubl(temp.X(), result, temp, dividend.X()); |
2766 DeoptimizeIfNotZero(temp, instr); | 2768 DeoptimizeIfNotZero(temp, instr, "lost precision"); |
2767 } | 2769 } |
2768 } | 2770 } |
2769 | 2771 |
2770 | 2772 |
2771 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 2773 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
2772 void LCodeGen::DoDivI(LDivI* instr) { | 2774 void LCodeGen::DoDivI(LDivI* instr) { |
2773 HBinaryOperation* hdiv = instr->hydrogen(); | 2775 HBinaryOperation* hdiv = instr->hydrogen(); |
2774 Register dividend = ToRegister32(instr->dividend()); | 2776 Register dividend = ToRegister32(instr->dividend()); |
2775 Register divisor = ToRegister32(instr->divisor()); | 2777 Register divisor = ToRegister32(instr->divisor()); |
2776 Register result = ToRegister32(instr->result()); | 2778 Register result = ToRegister32(instr->result()); |
2777 | 2779 |
2778 // Issue the division first, and then check for any deopt cases whilst the | 2780 // Issue the division first, and then check for any deopt cases whilst the |
2779 // result is computed. | 2781 // result is computed. |
2780 __ Sdiv(result, dividend, divisor); | 2782 __ Sdiv(result, dividend, divisor); |
2781 | 2783 |
2782 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 2784 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
2783 DCHECK_EQ(NULL, instr->temp()); | 2785 DCHECK_EQ(NULL, instr->temp()); |
2784 return; | 2786 return; |
2785 } | 2787 } |
2786 | 2788 |
2787 // Check for x / 0. | 2789 // Check for x / 0. |
2788 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 2790 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
2789 DeoptimizeIfZero(divisor, instr); | 2791 DeoptimizeIfZero(divisor, instr, "division by zero"); |
2790 } | 2792 } |
2791 | 2793 |
2792 // Check for (0 / -x) as that will produce negative zero. | 2794 // Check for (0 / -x) as that will produce negative zero. |
2793 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 2795 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
2794 __ Cmp(divisor, 0); | 2796 __ Cmp(divisor, 0); |
2795 | 2797 |
2796 // If the divisor < 0 (mi), compare the dividend, and deopt if it is | 2798 // If the divisor < 0 (mi), compare the dividend, and deopt if it is |
2797 // zero, ie. zero dividend with negative divisor deopts. | 2799 // zero, ie. zero dividend with negative divisor deopts. |
2798 // If the divisor >= 0 (pl, the opposite of mi) set the flags to | 2800 // If the divisor >= 0 (pl, the opposite of mi) set the flags to |
2799 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. | 2801 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. |
2800 __ Ccmp(dividend, 0, NoFlag, mi); | 2802 __ Ccmp(dividend, 0, NoFlag, mi); |
2801 DeoptimizeIf(eq, instr); | 2803 DeoptimizeIf(eq, instr, "minus zero"); |
2802 } | 2804 } |
2803 | 2805 |
2804 // Check for (kMinInt / -1). | 2806 // Check for (kMinInt / -1). |
2805 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | 2807 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
2806 // Test dividend for kMinInt by subtracting one (cmp) and checking for | 2808 // Test dividend for kMinInt by subtracting one (cmp) and checking for |
2807 // overflow. | 2809 // overflow. |
2808 __ Cmp(dividend, 1); | 2810 __ Cmp(dividend, 1); |
2809 // If overflow is set, ie. dividend = kMinInt, compare the divisor with | 2811 // If overflow is set, ie. dividend = kMinInt, compare the divisor with |
2810 // -1. If overflow is clear, set the flags for condition ne, as the | 2812 // -1. If overflow is clear, set the flags for condition ne, as the |
2811 // dividend isn't -1, and thus we shouldn't deopt. | 2813 // dividend isn't -1, and thus we shouldn't deopt. |
2812 __ Ccmp(divisor, -1, NoFlag, vs); | 2814 __ Ccmp(divisor, -1, NoFlag, vs); |
2813 DeoptimizeIf(eq, instr); | 2815 DeoptimizeIf(eq, instr, "overflow"); |
2814 } | 2816 } |
2815 | 2817 |
2816 // Compute remainder and deopt if it's not zero. | 2818 // Compute remainder and deopt if it's not zero. |
2817 Register remainder = ToRegister32(instr->temp()); | 2819 Register remainder = ToRegister32(instr->temp()); |
2818 __ Msub(remainder, result, divisor, dividend); | 2820 __ Msub(remainder, result, divisor, dividend); |
2819 DeoptimizeIfNotZero(remainder, instr); | 2821 DeoptimizeIfNotZero(remainder, instr, "lost precision"); |
2820 } | 2822 } |
2821 | 2823 |
2822 | 2824 |
2823 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { | 2825 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { |
2824 DoubleRegister input = ToDoubleRegister(instr->value()); | 2826 DoubleRegister input = ToDoubleRegister(instr->value()); |
2825 Register result = ToRegister32(instr->result()); | 2827 Register result = ToRegister32(instr->result()); |
2826 | 2828 |
2827 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 2829 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
2828 DeoptimizeIfMinusZero(input, instr); | 2830 DeoptimizeIfMinusZero(input, instr, "minus zero"); |
2829 } | 2831 } |
2830 | 2832 |
2831 __ TryRepresentDoubleAsInt32(result, input, double_scratch()); | 2833 __ TryRepresentDoubleAsInt32(result, input, double_scratch()); |
2832 DeoptimizeIf(ne, instr); | 2834 DeoptimizeIf(ne, instr, "lost precision or NaN"); |
2833 | 2835 |
2834 if (instr->tag_result()) { | 2836 if (instr->tag_result()) { |
2835 __ SmiTag(result.X()); | 2837 __ SmiTag(result.X()); |
2836 } | 2838 } |
2837 } | 2839 } |
2838 | 2840 |
2839 | 2841 |
2840 void LCodeGen::DoDrop(LDrop* instr) { | 2842 void LCodeGen::DoDrop(LDrop* instr) { |
2841 __ Drop(instr->count()); | 2843 __ Drop(instr->count()); |
2842 } | 2844 } |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2883 __ EnumLengthUntagged(result, map); | 2885 __ EnumLengthUntagged(result, map); |
2884 __ Cbnz(result, &load_cache); | 2886 __ Cbnz(result, &load_cache); |
2885 | 2887 |
2886 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array())); | 2888 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array())); |
2887 __ B(&done); | 2889 __ B(&done); |
2888 | 2890 |
2889 __ Bind(&load_cache); | 2891 __ Bind(&load_cache); |
2890 __ LoadInstanceDescriptors(map, result); | 2892 __ LoadInstanceDescriptors(map, result); |
2891 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 2893 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
2892 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 2894 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
2893 DeoptimizeIfZero(result, instr); | 2895 DeoptimizeIfZero(result, instr, "no cache"); |
2894 | 2896 |
2895 __ Bind(&done); | 2897 __ Bind(&done); |
2896 } | 2898 } |
2897 | 2899 |
2898 | 2900 |
2899 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 2901 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
2900 Register object = ToRegister(instr->object()); | 2902 Register object = ToRegister(instr->object()); |
2901 Register null_value = x5; | 2903 Register null_value = x5; |
2902 | 2904 |
2903 DCHECK(instr->IsMarkedAsCall()); | 2905 DCHECK(instr->IsMarkedAsCall()); |
2904 DCHECK(object.Is(x0)); | 2906 DCHECK(object.Is(x0)); |
2905 | 2907 |
2906 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr); | 2908 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, "undefined"); |
2907 | 2909 |
2908 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 2910 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
2909 __ Cmp(object, null_value); | 2911 __ Cmp(object, null_value); |
2910 DeoptimizeIf(eq, instr); | 2912 DeoptimizeIf(eq, instr, "null"); |
2911 | 2913 |
2912 DeoptimizeIfSmi(object, instr); | 2914 DeoptimizeIfSmi(object, instr, "Smi"); |
2913 | 2915 |
2914 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 2916 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
2915 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE); | 2917 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE); |
2916 DeoptimizeIf(le, instr); | 2918 DeoptimizeIf(le, instr, "not a JavaScript object"); |
2917 | 2919 |
2918 Label use_cache, call_runtime; | 2920 Label use_cache, call_runtime; |
2919 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime); | 2921 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime); |
2920 | 2922 |
2921 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); | 2923 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); |
2922 __ B(&use_cache); | 2924 __ B(&use_cache); |
2923 | 2925 |
2924 // Get the set of properties to enumerate. | 2926 // Get the set of properties to enumerate. |
2925 __ Bind(&call_runtime); | 2927 __ Bind(&call_runtime); |
2926 __ Push(object); | 2928 __ Push(object); |
2927 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 2929 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
2928 | 2930 |
2929 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset)); | 2931 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset)); |
2930 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr); | 2932 DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, "wrong map"); |
2931 | 2933 |
2932 __ Bind(&use_cache); | 2934 __ Bind(&use_cache); |
2933 } | 2935 } |
2934 | 2936 |
2935 | 2937 |
2936 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { | 2938 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { |
2937 Register input = ToRegister(instr->value()); | 2939 Register input = ToRegister(instr->value()); |
2938 Register result = ToRegister(instr->result()); | 2940 Register result = ToRegister(instr->result()); |
2939 | 2941 |
2940 __ AssertString(input); | 2942 __ AssertString(input); |
(...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3313 DoGap(label); | 3315 DoGap(label); |
3314 } | 3316 } |
3315 | 3317 |
3316 | 3318 |
3317 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 3319 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
3318 Register context = ToRegister(instr->context()); | 3320 Register context = ToRegister(instr->context()); |
3319 Register result = ToRegister(instr->result()); | 3321 Register result = ToRegister(instr->result()); |
3320 __ Ldr(result, ContextMemOperand(context, instr->slot_index())); | 3322 __ Ldr(result, ContextMemOperand(context, instr->slot_index())); |
3321 if (instr->hydrogen()->RequiresHoleCheck()) { | 3323 if (instr->hydrogen()->RequiresHoleCheck()) { |
3322 if (instr->hydrogen()->DeoptimizesOnHole()) { | 3324 if (instr->hydrogen()->DeoptimizesOnHole()) { |
3323 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr); | 3325 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); |
3324 } else { | 3326 } else { |
3325 Label not_the_hole; | 3327 Label not_the_hole; |
3326 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole); | 3328 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole); |
3327 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | 3329 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
3328 __ Bind(¬_the_hole); | 3330 __ Bind(¬_the_hole); |
3329 } | 3331 } |
3330 } | 3332 } |
3331 } | 3333 } |
3332 | 3334 |
3333 | 3335 |
3334 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { | 3336 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { |
3335 Register function = ToRegister(instr->function()); | 3337 Register function = ToRegister(instr->function()); |
3336 Register result = ToRegister(instr->result()); | 3338 Register result = ToRegister(instr->result()); |
3337 Register temp = ToRegister(instr->temp()); | 3339 Register temp = ToRegister(instr->temp()); |
3338 | 3340 |
3339 // Get the prototype or initial map from the function. | 3341 // Get the prototype or initial map from the function. |
3340 __ Ldr(result, FieldMemOperand(function, | 3342 __ Ldr(result, FieldMemOperand(function, |
3341 JSFunction::kPrototypeOrInitialMapOffset)); | 3343 JSFunction::kPrototypeOrInitialMapOffset)); |
3342 | 3344 |
3343 // Check that the function has a prototype or an initial map. | 3345 // Check that the function has a prototype or an initial map. |
3344 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr); | 3346 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); |
3345 | 3347 |
3346 // If the function does not have an initial map, we're done. | 3348 // If the function does not have an initial map, we're done. |
3347 Label done; | 3349 Label done; |
3348 __ CompareObjectType(result, temp, temp, MAP_TYPE); | 3350 __ CompareObjectType(result, temp, temp, MAP_TYPE); |
3349 __ B(ne, &done); | 3351 __ B(ne, &done); |
3350 | 3352 |
3351 // Get the prototype from the initial map. | 3353 // Get the prototype from the initial map. |
3352 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 3354 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
3353 | 3355 |
3354 // All done. | 3356 // All done. |
3355 __ Bind(&done); | 3357 __ Bind(&done); |
3356 } | 3358 } |
3357 | 3359 |
3358 | 3360 |
3359 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 3361 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
3360 Register result = ToRegister(instr->result()); | 3362 Register result = ToRegister(instr->result()); |
3361 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | 3363 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
3362 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); | 3364 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); |
3363 if (instr->hydrogen()->RequiresHoleCheck()) { | 3365 if (instr->hydrogen()->RequiresHoleCheck()) { |
3364 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr); | 3366 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); |
3365 } | 3367 } |
3366 } | 3368 } |
3367 | 3369 |
3368 | 3370 |
3369 template <class T> | 3371 template <class T> |
3370 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 3372 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { |
3371 DCHECK(FLAG_vector_ics); | 3373 DCHECK(FLAG_vector_ics); |
3372 Register vector = ToRegister(instr->temp_vector()); | 3374 Register vector = ToRegister(instr->temp_vector()); |
3373 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister())); | 3375 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister())); |
3374 __ Mov(vector, instr->hydrogen()->feedback_vector()); | 3376 __ Mov(vector, instr->hydrogen()->feedback_vector()); |
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3485 case EXTERNAL_INT32_ELEMENTS: | 3487 case EXTERNAL_INT32_ELEMENTS: |
3486 case INT32_ELEMENTS: | 3488 case INT32_ELEMENTS: |
3487 __ Ldrsw(result, mem_op); | 3489 __ Ldrsw(result, mem_op); |
3488 break; | 3490 break; |
3489 case EXTERNAL_UINT32_ELEMENTS: | 3491 case EXTERNAL_UINT32_ELEMENTS: |
3490 case UINT32_ELEMENTS: | 3492 case UINT32_ELEMENTS: |
3491 __ Ldr(result.W(), mem_op); | 3493 __ Ldr(result.W(), mem_op); |
3492 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3494 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
3493 // Deopt if value > 0x80000000. | 3495 // Deopt if value > 0x80000000. |
3494 __ Tst(result, 0xFFFFFFFF80000000); | 3496 __ Tst(result, 0xFFFFFFFF80000000); |
3495 DeoptimizeIf(ne, instr); | 3497 DeoptimizeIf(ne, instr, "negative value"); |
3496 } | 3498 } |
3497 break; | 3499 break; |
3498 case FLOAT32_ELEMENTS: | 3500 case FLOAT32_ELEMENTS: |
3499 case FLOAT64_ELEMENTS: | 3501 case FLOAT64_ELEMENTS: |
3500 case EXTERNAL_FLOAT32_ELEMENTS: | 3502 case EXTERNAL_FLOAT32_ELEMENTS: |
3501 case EXTERNAL_FLOAT64_ELEMENTS: | 3503 case EXTERNAL_FLOAT64_ELEMENTS: |
3502 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3504 case FAST_HOLEY_DOUBLE_ELEMENTS: |
3503 case FAST_HOLEY_ELEMENTS: | 3505 case FAST_HOLEY_ELEMENTS: |
3504 case FAST_HOLEY_SMI_ELEMENTS: | 3506 case FAST_HOLEY_SMI_ELEMENTS: |
3505 case FAST_DOUBLE_ELEMENTS: | 3507 case FAST_DOUBLE_ELEMENTS: |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3582 | 3584 |
3583 __ Ldr(result, mem_op); | 3585 __ Ldr(result, mem_op); |
3584 | 3586 |
3585 if (instr->hydrogen()->RequiresHoleCheck()) { | 3587 if (instr->hydrogen()->RequiresHoleCheck()) { |
3586 Register scratch = ToRegister(instr->temp()); | 3588 Register scratch = ToRegister(instr->temp()); |
3587 // Detect the hole NaN by adding one to the integer representation of the | 3589 // Detect the hole NaN by adding one to the integer representation of the |
3588 // result, and checking for overflow. | 3590 // result, and checking for overflow. |
3589 STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff); | 3591 STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff); |
3590 __ Ldr(scratch, mem_op); | 3592 __ Ldr(scratch, mem_op); |
3591 __ Cmn(scratch, 1); | 3593 __ Cmn(scratch, 1); |
3592 DeoptimizeIf(vs, instr); | 3594 DeoptimizeIf(vs, instr, "hole"); |
3593 } | 3595 } |
3594 } | 3596 } |
3595 | 3597 |
3596 | 3598 |
3597 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { | 3599 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { |
3598 Register elements = ToRegister(instr->elements()); | 3600 Register elements = ToRegister(instr->elements()); |
3599 Register result = ToRegister(instr->result()); | 3601 Register result = ToRegister(instr->result()); |
3600 MemOperand mem_op; | 3602 MemOperand mem_op; |
3601 | 3603 |
3602 Representation representation = instr->hydrogen()->representation(); | 3604 Representation representation = instr->hydrogen()->representation(); |
(...skipping 17 matching lines...) Expand all Loading... |
3620 | 3622 |
3621 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, | 3623 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, |
3622 instr->hydrogen()->elements_kind(), | 3624 instr->hydrogen()->elements_kind(), |
3623 representation, instr->base_offset()); | 3625 representation, instr->base_offset()); |
3624 } | 3626 } |
3625 | 3627 |
3626 __ Load(result, mem_op, representation); | 3628 __ Load(result, mem_op, representation); |
3627 | 3629 |
3628 if (instr->hydrogen()->RequiresHoleCheck()) { | 3630 if (instr->hydrogen()->RequiresHoleCheck()) { |
3629 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 3631 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
3630 DeoptimizeIfNotSmi(result, instr); | 3632 DeoptimizeIfNotSmi(result, instr, "not a Smi"); |
3631 } else { | 3633 } else { |
3632 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr); | 3634 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); |
3633 } | 3635 } |
3634 } | 3636 } |
3635 } | 3637 } |
3636 | 3638 |
3637 | 3639 |
3638 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { | 3640 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { |
3639 DCHECK(ToRegister(instr->context()).is(cp)); | 3641 DCHECK(ToRegister(instr->context()).is(cp)); |
3640 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); | 3642 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); |
3641 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); | 3643 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); |
3642 if (FLAG_vector_ics) { | 3644 if (FLAG_vector_ics) { |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3724 if (r.IsDouble()) { | 3726 if (r.IsDouble()) { |
3725 DoubleRegister input = ToDoubleRegister(instr->value()); | 3727 DoubleRegister input = ToDoubleRegister(instr->value()); |
3726 DoubleRegister result = ToDoubleRegister(instr->result()); | 3728 DoubleRegister result = ToDoubleRegister(instr->result()); |
3727 __ Fabs(result, input); | 3729 __ Fabs(result, input); |
3728 } else if (r.IsSmi() || r.IsInteger32()) { | 3730 } else if (r.IsSmi() || r.IsInteger32()) { |
3729 Register input = r.IsSmi() ? ToRegister(instr->value()) | 3731 Register input = r.IsSmi() ? ToRegister(instr->value()) |
3730 : ToRegister32(instr->value()); | 3732 : ToRegister32(instr->value()); |
3731 Register result = r.IsSmi() ? ToRegister(instr->result()) | 3733 Register result = r.IsSmi() ? ToRegister(instr->result()) |
3732 : ToRegister32(instr->result()); | 3734 : ToRegister32(instr->result()); |
3733 __ Abs(result, input); | 3735 __ Abs(result, input); |
3734 DeoptimizeIf(vs, instr); | 3736 DeoptimizeIf(vs, instr, "overflow"); |
3735 } | 3737 } |
3736 } | 3738 } |
3737 | 3739 |
3738 | 3740 |
3739 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr, | 3741 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr, |
3740 Label* exit, | 3742 Label* exit, |
3741 Label* allocation_entry) { | 3743 Label* allocation_entry) { |
3742 // Handle the tricky cases of MathAbsTagged: | 3744 // Handle the tricky cases of MathAbsTagged: |
3743 // - HeapNumber inputs. | 3745 // - HeapNumber inputs. |
3744 // - Negative inputs produce a positive result, so a new HeapNumber is | 3746 // - Negative inputs produce a positive result, so a new HeapNumber is |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3876 | 3878 |
3877 __ Frintm(result, input); | 3879 __ Frintm(result, input); |
3878 } | 3880 } |
3879 | 3881 |
3880 | 3882 |
3881 void LCodeGen::DoMathFloorI(LMathFloorI* instr) { | 3883 void LCodeGen::DoMathFloorI(LMathFloorI* instr) { |
3882 DoubleRegister input = ToDoubleRegister(instr->value()); | 3884 DoubleRegister input = ToDoubleRegister(instr->value()); |
3883 Register result = ToRegister(instr->result()); | 3885 Register result = ToRegister(instr->result()); |
3884 | 3886 |
3885 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3887 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3886 DeoptimizeIfMinusZero(input, instr); | 3888 DeoptimizeIfMinusZero(input, instr, "minus zero"); |
3887 } | 3889 } |
3888 | 3890 |
3889 __ Fcvtms(result, input); | 3891 __ Fcvtms(result, input); |
3890 | 3892 |
3891 // Check that the result fits into a 32-bit integer. | 3893 // Check that the result fits into a 32-bit integer. |
3892 // - The result did not overflow. | 3894 // - The result did not overflow. |
3893 __ Cmp(result, Operand(result, SXTW)); | 3895 __ Cmp(result, Operand(result, SXTW)); |
3894 // - The input was not NaN. | 3896 // - The input was not NaN. |
3895 __ Fccmp(input, input, NoFlag, eq); | 3897 __ Fccmp(input, input, NoFlag, eq); |
3896 DeoptimizeIf(ne, instr); | 3898 DeoptimizeIf(ne, instr, "lost precision or NaN"); |
3897 } | 3899 } |
3898 | 3900 |
3899 | 3901 |
3900 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | 3902 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
3901 Register dividend = ToRegister32(instr->dividend()); | 3903 Register dividend = ToRegister32(instr->dividend()); |
3902 Register result = ToRegister32(instr->result()); | 3904 Register result = ToRegister32(instr->result()); |
3903 int32_t divisor = instr->divisor(); | 3905 int32_t divisor = instr->divisor(); |
3904 | 3906 |
3905 // If the divisor is 1, return the dividend. | 3907 // If the divisor is 1, return the dividend. |
3906 if (divisor == 1) { | 3908 if (divisor == 1) { |
3907 __ Mov(result, dividend, kDiscardForSameWReg); | 3909 __ Mov(result, dividend, kDiscardForSameWReg); |
3908 return; | 3910 return; |
3909 } | 3911 } |
3910 | 3912 |
3911 // If the divisor is positive, things are easy: There can be no deopts and we | 3913 // If the divisor is positive, things are easy: There can be no deopts and we |
3912 // can simply do an arithmetic right shift. | 3914 // can simply do an arithmetic right shift. |
3913 int32_t shift = WhichPowerOf2Abs(divisor); | 3915 int32_t shift = WhichPowerOf2Abs(divisor); |
3914 if (divisor > 1) { | 3916 if (divisor > 1) { |
3915 __ Mov(result, Operand(dividend, ASR, shift)); | 3917 __ Mov(result, Operand(dividend, ASR, shift)); |
3916 return; | 3918 return; |
3917 } | 3919 } |
3918 | 3920 |
3919 // If the divisor is negative, we have to negate and handle edge cases. | 3921 // If the divisor is negative, we have to negate and handle edge cases. |
3920 __ Negs(result, dividend); | 3922 __ Negs(result, dividend); |
3921 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3923 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3922 DeoptimizeIf(eq, instr); | 3924 DeoptimizeIf(eq, instr, "minus zero"); |
3923 } | 3925 } |
3924 | 3926 |
3925 // Dividing by -1 is basically negation, unless we overflow. | 3927 // Dividing by -1 is basically negation, unless we overflow. |
3926 if (divisor == -1) { | 3928 if (divisor == -1) { |
3927 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 3929 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
3928 DeoptimizeIf(vs, instr); | 3930 DeoptimizeIf(vs, instr, "overflow"); |
3929 } | 3931 } |
3930 return; | 3932 return; |
3931 } | 3933 } |
3932 | 3934 |
3933 // If the negation could not overflow, simply shifting is OK. | 3935 // If the negation could not overflow, simply shifting is OK. |
3934 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 3936 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
3935 __ Mov(result, Operand(dividend, ASR, shift)); | 3937 __ Mov(result, Operand(dividend, ASR, shift)); |
3936 return; | 3938 return; |
3937 } | 3939 } |
3938 | 3940 |
3939 __ Asr(result, result, shift); | 3941 __ Asr(result, result, shift); |
3940 __ Csel(result, result, kMinInt / divisor, vc); | 3942 __ Csel(result, result, kMinInt / divisor, vc); |
3941 } | 3943 } |
3942 | 3944 |
3943 | 3945 |
3944 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 3946 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
3945 Register dividend = ToRegister32(instr->dividend()); | 3947 Register dividend = ToRegister32(instr->dividend()); |
3946 int32_t divisor = instr->divisor(); | 3948 int32_t divisor = instr->divisor(); |
3947 Register result = ToRegister32(instr->result()); | 3949 Register result = ToRegister32(instr->result()); |
3948 DCHECK(!AreAliased(dividend, result)); | 3950 DCHECK(!AreAliased(dividend, result)); |
3949 | 3951 |
3950 if (divisor == 0) { | 3952 if (divisor == 0) { |
3951 Deoptimize(instr); | 3953 Deoptimize(instr, "division by zero"); |
3952 return; | 3954 return; |
3953 } | 3955 } |
3954 | 3956 |
3955 // Check for (0 / -x) that will produce negative zero. | 3957 // Check for (0 / -x) that will produce negative zero. |
3956 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 3958 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
3957 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 3959 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
3958 DeoptimizeIfZero(dividend, instr); | 3960 DeoptimizeIfZero(dividend, instr, "minus zero"); |
3959 } | 3961 } |
3960 | 3962 |
3961 // Easy case: We need no dynamic check for the dividend and the flooring | 3963 // Easy case: We need no dynamic check for the dividend and the flooring |
3962 // division is the same as the truncating division. | 3964 // division is the same as the truncating division. |
3963 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 3965 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
3964 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 3966 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
3965 __ TruncatingDiv(result, dividend, Abs(divisor)); | 3967 __ TruncatingDiv(result, dividend, Abs(divisor)); |
3966 if (divisor < 0) __ Neg(result, result); | 3968 if (divisor < 0) __ Neg(result, result); |
3967 return; | 3969 return; |
3968 } | 3970 } |
(...skipping 22 matching lines...) Expand all Loading... |
3991 Register dividend = ToRegister32(instr->dividend()); | 3993 Register dividend = ToRegister32(instr->dividend()); |
3992 Register divisor = ToRegister32(instr->divisor()); | 3994 Register divisor = ToRegister32(instr->divisor()); |
3993 Register remainder = ToRegister32(instr->temp()); | 3995 Register remainder = ToRegister32(instr->temp()); |
3994 Register result = ToRegister32(instr->result()); | 3996 Register result = ToRegister32(instr->result()); |
3995 | 3997 |
3996 // This can't cause an exception on ARM, so we can speculatively | 3998 // This can't cause an exception on ARM, so we can speculatively |
3997 // execute it already now. | 3999 // execute it already now. |
3998 __ Sdiv(result, dividend, divisor); | 4000 __ Sdiv(result, dividend, divisor); |
3999 | 4001 |
4000 // Check for x / 0. | 4002 // Check for x / 0. |
4001 DeoptimizeIfZero(divisor, instr); | 4003 DeoptimizeIfZero(divisor, instr, "division by zero"); |
4002 | 4004 |
4003 // Check for (kMinInt / -1). | 4005 // Check for (kMinInt / -1). |
4004 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 4006 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
4005 // The V flag will be set iff dividend == kMinInt. | 4007 // The V flag will be set iff dividend == kMinInt. |
4006 __ Cmp(dividend, 1); | 4008 __ Cmp(dividend, 1); |
4007 __ Ccmp(divisor, -1, NoFlag, vs); | 4009 __ Ccmp(divisor, -1, NoFlag, vs); |
4008 DeoptimizeIf(eq, instr); | 4010 DeoptimizeIf(eq, instr, "overflow"); |
4009 } | 4011 } |
4010 | 4012 |
4011 // Check for (0 / -x) that will produce negative zero. | 4013 // Check for (0 / -x) that will produce negative zero. |
4012 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4014 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
4013 __ Cmp(divisor, 0); | 4015 __ Cmp(divisor, 0); |
4014 __ Ccmp(dividend, 0, ZFlag, mi); | 4016 __ Ccmp(dividend, 0, ZFlag, mi); |
4015 // "divisor" can't be null because the code would have already been | 4017 // "divisor" can't be null because the code would have already been |
4016 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). | 4018 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). |
4017 // In this case we need to deoptimize to produce a -0. | 4019 // In this case we need to deoptimize to produce a -0. |
4018 DeoptimizeIf(eq, instr); | 4020 DeoptimizeIf(eq, instr, "minus zero"); |
4019 } | 4021 } |
4020 | 4022 |
4021 Label done; | 4023 Label done; |
4022 // If both operands have the same sign then we are done. | 4024 // If both operands have the same sign then we are done. |
4023 __ Eor(remainder, dividend, divisor); | 4025 __ Eor(remainder, dividend, divisor); |
4024 __ Tbz(remainder, kWSignBit, &done); | 4026 __ Tbz(remainder, kWSignBit, &done); |
4025 | 4027 |
4026 // Check if the result needs to be corrected. | 4028 // Check if the result needs to be corrected. |
4027 __ Msub(remainder, result, divisor, dividend); | 4029 __ Msub(remainder, result, divisor, dividend); |
4028 __ Cbz(remainder, &done); | 4030 __ Cbz(remainder, &done); |
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4167 // result fits in 32 bits. | 4169 // result fits in 32 bits. |
4168 __ Cmp(result, Operand(result.W(), SXTW)); | 4170 __ Cmp(result, Operand(result.W(), SXTW)); |
4169 __ Ccmp(result, 1, ZFlag, eq); | 4171 __ Ccmp(result, 1, ZFlag, eq); |
4170 __ B(hi, &done); | 4172 __ B(hi, &done); |
4171 | 4173 |
4172 // At this point, we have to handle possible inputs of NaN or numbers in the | 4174 // At this point, we have to handle possible inputs of NaN or numbers in the |
4173 // range [-0.5, 1.5[, or numbers larger than 32 bits. | 4175 // range [-0.5, 1.5[, or numbers larger than 32 bits. |
4174 | 4176 |
4175 // Deoptimize if the result > 1, as it must be larger than 32 bits. | 4177 // Deoptimize if the result > 1, as it must be larger than 32 bits. |
4176 __ Cmp(result, 1); | 4178 __ Cmp(result, 1); |
4177 DeoptimizeIf(hi, instr); | 4179 DeoptimizeIf(hi, instr, "overflow"); |
4178 | 4180 |
4179 // Deoptimize for negative inputs, which at this point are only numbers in | 4181 // Deoptimize for negative inputs, which at this point are only numbers in |
4180 // the range [-0.5, -0.0] | 4182 // the range [-0.5, -0.0] |
4181 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4183 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
4182 __ Fmov(result, input); | 4184 __ Fmov(result, input); |
4183 DeoptimizeIfNegative(result, instr); | 4185 DeoptimizeIfNegative(result, instr, "minus zero"); |
4184 } | 4186 } |
4185 | 4187 |
4186 // Deoptimize if the input was NaN. | 4188 // Deoptimize if the input was NaN. |
4187 __ Fcmp(input, dot_five); | 4189 __ Fcmp(input, dot_five); |
4188 DeoptimizeIf(vs, instr); | 4190 DeoptimizeIf(vs, instr, "NaN"); |
4189 | 4191 |
4190 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[ | 4192 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[ |
4191 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1, | 4193 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1, |
4192 // else 0; we avoid dealing with 0.499...94 directly. | 4194 // else 0; we avoid dealing with 0.499...94 directly. |
4193 __ Cset(result, ge); | 4195 __ Cset(result, ge); |
4194 __ Bind(&done); | 4196 __ Bind(&done); |
4195 } | 4197 } |
4196 | 4198 |
4197 | 4199 |
4198 void LCodeGen::DoMathFround(LMathFround* instr) { | 4200 void LCodeGen::DoMathFround(LMathFround* instr) { |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4256 HMod* hmod = instr->hydrogen(); | 4258 HMod* hmod = instr->hydrogen(); |
4257 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 4259 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
4258 Label dividend_is_not_negative, done; | 4260 Label dividend_is_not_negative, done; |
4259 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 4261 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
4260 __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative); | 4262 __ Tbz(dividend, kWSignBit, ÷nd_is_not_negative); |
4261 // Note that this is correct even for kMinInt operands. | 4263 // Note that this is correct even for kMinInt operands. |
4262 __ Neg(dividend, dividend); | 4264 __ Neg(dividend, dividend); |
4263 __ And(dividend, dividend, mask); | 4265 __ And(dividend, dividend, mask); |
4264 __ Negs(dividend, dividend); | 4266 __ Negs(dividend, dividend); |
4265 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4267 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
4266 DeoptimizeIf(eq, instr); | 4268 DeoptimizeIf(eq, instr, "minus zero"); |
4267 } | 4269 } |
4268 __ B(&done); | 4270 __ B(&done); |
4269 } | 4271 } |
4270 | 4272 |
4271 __ bind(÷nd_is_not_negative); | 4273 __ bind(÷nd_is_not_negative); |
4272 __ And(dividend, dividend, mask); | 4274 __ And(dividend, dividend, mask); |
4273 __ bind(&done); | 4275 __ bind(&done); |
4274 } | 4276 } |
4275 | 4277 |
4276 | 4278 |
4277 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 4279 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
4278 Register dividend = ToRegister32(instr->dividend()); | 4280 Register dividend = ToRegister32(instr->dividend()); |
4279 int32_t divisor = instr->divisor(); | 4281 int32_t divisor = instr->divisor(); |
4280 Register result = ToRegister32(instr->result()); | 4282 Register result = ToRegister32(instr->result()); |
4281 Register temp = ToRegister32(instr->temp()); | 4283 Register temp = ToRegister32(instr->temp()); |
4282 DCHECK(!AreAliased(dividend, result, temp)); | 4284 DCHECK(!AreAliased(dividend, result, temp)); |
4283 | 4285 |
4284 if (divisor == 0) { | 4286 if (divisor == 0) { |
4285 Deoptimize(instr); | 4287 Deoptimize(instr, "division by zero"); |
4286 return; | 4288 return; |
4287 } | 4289 } |
4288 | 4290 |
4289 __ TruncatingDiv(result, dividend, Abs(divisor)); | 4291 __ TruncatingDiv(result, dividend, Abs(divisor)); |
4290 __ Sxtw(dividend.X(), dividend); | 4292 __ Sxtw(dividend.X(), dividend); |
4291 __ Mov(temp, Abs(divisor)); | 4293 __ Mov(temp, Abs(divisor)); |
4292 __ Smsubl(result.X(), result, temp, dividend.X()); | 4294 __ Smsubl(result.X(), result, temp, dividend.X()); |
4293 | 4295 |
4294 // Check for negative zero. | 4296 // Check for negative zero. |
4295 HMod* hmod = instr->hydrogen(); | 4297 HMod* hmod = instr->hydrogen(); |
4296 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4298 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
4297 Label remainder_not_zero; | 4299 Label remainder_not_zero; |
4298 __ Cbnz(result, &remainder_not_zero); | 4300 __ Cbnz(result, &remainder_not_zero); |
4299 DeoptimizeIfNegative(dividend, instr); | 4301 DeoptimizeIfNegative(dividend, instr, "minus zero"); |
4300 __ bind(&remainder_not_zero); | 4302 __ bind(&remainder_not_zero); |
4301 } | 4303 } |
4302 } | 4304 } |
4303 | 4305 |
4304 | 4306 |
4305 void LCodeGen::DoModI(LModI* instr) { | 4307 void LCodeGen::DoModI(LModI* instr) { |
4306 Register dividend = ToRegister32(instr->left()); | 4308 Register dividend = ToRegister32(instr->left()); |
4307 Register divisor = ToRegister32(instr->right()); | 4309 Register divisor = ToRegister32(instr->right()); |
4308 Register result = ToRegister32(instr->result()); | 4310 Register result = ToRegister32(instr->result()); |
4309 | 4311 |
4310 Label done; | 4312 Label done; |
4311 // modulo = dividend - quotient * divisor | 4313 // modulo = dividend - quotient * divisor |
4312 __ Sdiv(result, dividend, divisor); | 4314 __ Sdiv(result, dividend, divisor); |
4313 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { | 4315 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { |
4314 DeoptimizeIfZero(divisor, instr); | 4316 DeoptimizeIfZero(divisor, instr, "division by zero"); |
4315 } | 4317 } |
4316 __ Msub(result, result, divisor, dividend); | 4318 __ Msub(result, result, divisor, dividend); |
4317 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4319 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
4318 __ Cbnz(result, &done); | 4320 __ Cbnz(result, &done); |
4319 DeoptimizeIfNegative(dividend, instr); | 4321 DeoptimizeIfNegative(dividend, instr, "minus zero"); |
4320 } | 4322 } |
4321 __ Bind(&done); | 4323 __ Bind(&done); |
4322 } | 4324 } |
4323 | 4325 |
4324 | 4326 |
4325 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { | 4327 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { |
4326 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32()); | 4328 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32()); |
4327 bool is_smi = instr->hydrogen()->representation().IsSmi(); | 4329 bool is_smi = instr->hydrogen()->representation().IsSmi(); |
4328 Register result = | 4330 Register result = |
4329 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); | 4331 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); |
4330 Register left = | 4332 Register left = |
4331 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ; | 4333 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ; |
4332 int32_t right = ToInteger32(instr->right()); | 4334 int32_t right = ToInteger32(instr->right()); |
4333 DCHECK((right > -kMaxInt) || (right < kMaxInt)); | 4335 DCHECK((right > -kMaxInt) || (right < kMaxInt)); |
4334 | 4336 |
4335 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 4337 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
4336 bool bailout_on_minus_zero = | 4338 bool bailout_on_minus_zero = |
4337 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 4339 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
4338 | 4340 |
4339 if (bailout_on_minus_zero) { | 4341 if (bailout_on_minus_zero) { |
4340 if (right < 0) { | 4342 if (right < 0) { |
4341 // The result is -0 if right is negative and left is zero. | 4343 // The result is -0 if right is negative and left is zero. |
4342 DeoptimizeIfZero(left, instr); | 4344 DeoptimizeIfZero(left, instr, "minus zero"); |
4343 } else if (right == 0) { | 4345 } else if (right == 0) { |
4344 // The result is -0 if the right is zero and the left is negative. | 4346 // The result is -0 if the right is zero and the left is negative. |
4345 DeoptimizeIfNegative(left, instr); | 4347 DeoptimizeIfNegative(left, instr, "minus zero"); |
4346 } | 4348 } |
4347 } | 4349 } |
4348 | 4350 |
4349 switch (right) { | 4351 switch (right) { |
4350 // Cases which can detect overflow. | 4352 // Cases which can detect overflow. |
4351 case -1: | 4353 case -1: |
4352 if (can_overflow) { | 4354 if (can_overflow) { |
4353 // Only 0x80000000 can overflow here. | 4355 // Only 0x80000000 can overflow here. |
4354 __ Negs(result, left); | 4356 __ Negs(result, left); |
4355 DeoptimizeIf(vs, instr); | 4357 DeoptimizeIf(vs, instr, "overflow"); |
4356 } else { | 4358 } else { |
4357 __ Neg(result, left); | 4359 __ Neg(result, left); |
4358 } | 4360 } |
4359 break; | 4361 break; |
4360 case 0: | 4362 case 0: |
4361 // This case can never overflow. | 4363 // This case can never overflow. |
4362 __ Mov(result, 0); | 4364 __ Mov(result, 0); |
4363 break; | 4365 break; |
4364 case 1: | 4366 case 1: |
4365 // This case can never overflow. | 4367 // This case can never overflow. |
4366 __ Mov(result, left, kDiscardForSameWReg); | 4368 __ Mov(result, left, kDiscardForSameWReg); |
4367 break; | 4369 break; |
4368 case 2: | 4370 case 2: |
4369 if (can_overflow) { | 4371 if (can_overflow) { |
4370 __ Adds(result, left, left); | 4372 __ Adds(result, left, left); |
4371 DeoptimizeIf(vs, instr); | 4373 DeoptimizeIf(vs, instr, "overflow"); |
4372 } else { | 4374 } else { |
4373 __ Add(result, left, left); | 4375 __ Add(result, left, left); |
4374 } | 4376 } |
4375 break; | 4377 break; |
4376 | 4378 |
4377 default: | 4379 default: |
4378 // Multiplication by constant powers of two (and some related values) | 4380 // Multiplication by constant powers of two (and some related values) |
4379 // can be done efficiently with shifted operands. | 4381 // can be done efficiently with shifted operands. |
4380 int32_t right_abs = Abs(right); | 4382 int32_t right_abs = Abs(right); |
4381 | 4383 |
4382 if (base::bits::IsPowerOfTwo32(right_abs)) { | 4384 if (base::bits::IsPowerOfTwo32(right_abs)) { |
4383 int right_log2 = WhichPowerOf2(right_abs); | 4385 int right_log2 = WhichPowerOf2(right_abs); |
4384 | 4386 |
4385 if (can_overflow) { | 4387 if (can_overflow) { |
4386 Register scratch = result; | 4388 Register scratch = result; |
4387 DCHECK(!AreAliased(scratch, left)); | 4389 DCHECK(!AreAliased(scratch, left)); |
4388 __ Cls(scratch, left); | 4390 __ Cls(scratch, left); |
4389 __ Cmp(scratch, right_log2); | 4391 __ Cmp(scratch, right_log2); |
4390 DeoptimizeIf(lt, instr); | 4392 DeoptimizeIf(lt, instr, "overflow"); |
4391 } | 4393 } |
4392 | 4394 |
4393 if (right >= 0) { | 4395 if (right >= 0) { |
4394 // result = left << log2(right) | 4396 // result = left << log2(right) |
4395 __ Lsl(result, left, right_log2); | 4397 __ Lsl(result, left, right_log2); |
4396 } else { | 4398 } else { |
4397 // result = -left << log2(-right) | 4399 // result = -left << log2(-right) |
4398 if (can_overflow) { | 4400 if (can_overflow) { |
4399 __ Negs(result, Operand(left, LSL, right_log2)); | 4401 __ Negs(result, Operand(left, LSL, right_log2)); |
4400 DeoptimizeIf(vs, instr); | 4402 DeoptimizeIf(vs, instr, "overflow"); |
4401 } else { | 4403 } else { |
4402 __ Neg(result, Operand(left, LSL, right_log2)); | 4404 __ Neg(result, Operand(left, LSL, right_log2)); |
4403 } | 4405 } |
4404 } | 4406 } |
4405 return; | 4407 return; |
4406 } | 4408 } |
4407 | 4409 |
4408 | 4410 |
4409 // For the following cases, we could perform a conservative overflow check | 4411 // For the following cases, we could perform a conservative overflow check |
4410 // with CLS as above. However the few cycles saved are likely not worth | 4412 // with CLS as above. However the few cycles saved are likely not worth |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4448 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 4450 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
4449 | 4451 |
4450 if (bailout_on_minus_zero && !left.Is(right)) { | 4452 if (bailout_on_minus_zero && !left.Is(right)) { |
4451 // If one operand is zero and the other is negative, the result is -0. | 4453 // If one operand is zero and the other is negative, the result is -0. |
4452 // - Set Z (eq) if either left or right, or both, are 0. | 4454 // - Set Z (eq) if either left or right, or both, are 0. |
4453 __ Cmp(left, 0); | 4455 __ Cmp(left, 0); |
4454 __ Ccmp(right, 0, ZFlag, ne); | 4456 __ Ccmp(right, 0, ZFlag, ne); |
4455 // - If so (eq), set N (mi) if left + right is negative. | 4457 // - If so (eq), set N (mi) if left + right is negative. |
4456 // - Otherwise, clear N. | 4458 // - Otherwise, clear N. |
4457 __ Ccmn(left, right, NoFlag, eq); | 4459 __ Ccmn(left, right, NoFlag, eq); |
4458 DeoptimizeIf(mi, instr); | 4460 DeoptimizeIf(mi, instr, "minus zero"); |
4459 } | 4461 } |
4460 | 4462 |
4461 if (can_overflow) { | 4463 if (can_overflow) { |
4462 __ Smull(result.X(), left, right); | 4464 __ Smull(result.X(), left, right); |
4463 __ Cmp(result.X(), Operand(result, SXTW)); | 4465 __ Cmp(result.X(), Operand(result, SXTW)); |
4464 DeoptimizeIf(ne, instr); | 4466 DeoptimizeIf(ne, instr, "overflow"); |
4465 } else { | 4467 } else { |
4466 __ Mul(result, left, right); | 4468 __ Mul(result, left, right); |
4467 } | 4469 } |
4468 } | 4470 } |
4469 | 4471 |
4470 | 4472 |
4471 void LCodeGen::DoMulS(LMulS* instr) { | 4473 void LCodeGen::DoMulS(LMulS* instr) { |
4472 Register result = ToRegister(instr->result()); | 4474 Register result = ToRegister(instr->result()); |
4473 Register left = ToRegister(instr->left()); | 4475 Register left = ToRegister(instr->left()); |
4474 Register right = ToRegister(instr->right()); | 4476 Register right = ToRegister(instr->right()); |
4475 | 4477 |
4476 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 4478 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
4477 bool bailout_on_minus_zero = | 4479 bool bailout_on_minus_zero = |
4478 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 4480 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
4479 | 4481 |
4480 if (bailout_on_minus_zero && !left.Is(right)) { | 4482 if (bailout_on_minus_zero && !left.Is(right)) { |
4481 // If one operand is zero and the other is negative, the result is -0. | 4483 // If one operand is zero and the other is negative, the result is -0. |
4482 // - Set Z (eq) if either left or right, or both, are 0. | 4484 // - Set Z (eq) if either left or right, or both, are 0. |
4483 __ Cmp(left, 0); | 4485 __ Cmp(left, 0); |
4484 __ Ccmp(right, 0, ZFlag, ne); | 4486 __ Ccmp(right, 0, ZFlag, ne); |
4485 // - If so (eq), set N (mi) if left + right is negative. | 4487 // - If so (eq), set N (mi) if left + right is negative. |
4486 // - Otherwise, clear N. | 4488 // - Otherwise, clear N. |
4487 __ Ccmn(left, right, NoFlag, eq); | 4489 __ Ccmn(left, right, NoFlag, eq); |
4488 DeoptimizeIf(mi, instr); | 4490 DeoptimizeIf(mi, instr, "minus zero"); |
4489 } | 4491 } |
4490 | 4492 |
4491 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); | 4493 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); |
4492 if (can_overflow) { | 4494 if (can_overflow) { |
4493 __ Smulh(result, left, right); | 4495 __ Smulh(result, left, right); |
4494 __ Cmp(result, Operand(result.W(), SXTW)); | 4496 __ Cmp(result, Operand(result.W(), SXTW)); |
4495 __ SmiTag(result); | 4497 __ SmiTag(result); |
4496 DeoptimizeIf(ne, instr); | 4498 DeoptimizeIf(ne, instr, "overflow"); |
4497 } else { | 4499 } else { |
4498 if (AreAliased(result, left, right)) { | 4500 if (AreAliased(result, left, right)) { |
4499 // All three registers are the same: half untag the input and then | 4501 // All three registers are the same: half untag the input and then |
4500 // multiply, giving a tagged result. | 4502 // multiply, giving a tagged result. |
4501 STATIC_ASSERT((kSmiShift % 2) == 0); | 4503 STATIC_ASSERT((kSmiShift % 2) == 0); |
4502 __ Asr(result, left, kSmiShift / 2); | 4504 __ Asr(result, left, kSmiShift / 2); |
4503 __ Mul(result, result, result); | 4505 __ Mul(result, result, result); |
4504 } else if (result.Is(left) && !left.Is(right)) { | 4506 } else if (result.Is(left) && !left.Is(right)) { |
4505 // Registers result and left alias, right is distinct: untag left into | 4507 // Registers result and left alias, right is distinct: untag left into |
4506 // result, and then multiply by right, giving a tagged result. | 4508 // result, and then multiply by right, giving a tagged result. |
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4662 // Heap number map check. | 4664 // Heap number map check. |
4663 if (can_convert_undefined_to_nan) { | 4665 if (can_convert_undefined_to_nan) { |
4664 __ JumpIfNotHeapNumber(input, &convert_undefined); | 4666 __ JumpIfNotHeapNumber(input, &convert_undefined); |
4665 } else { | 4667 } else { |
4666 DeoptimizeIfNotHeapNumber(input, instr); | 4668 DeoptimizeIfNotHeapNumber(input, instr); |
4667 } | 4669 } |
4668 | 4670 |
4669 // Load heap number. | 4671 // Load heap number. |
4670 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); | 4672 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); |
4671 if (instr->hydrogen()->deoptimize_on_minus_zero()) { | 4673 if (instr->hydrogen()->deoptimize_on_minus_zero()) { |
4672 DeoptimizeIfMinusZero(result, instr); | 4674 DeoptimizeIfMinusZero(result, instr, "minus zero"); |
4673 } | 4675 } |
4674 __ B(&done); | 4676 __ B(&done); |
4675 | 4677 |
4676 if (can_convert_undefined_to_nan) { | 4678 if (can_convert_undefined_to_nan) { |
4677 __ Bind(&convert_undefined); | 4679 __ Bind(&convert_undefined); |
4678 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr); | 4680 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, |
| 4681 "not a heap number/undefined"); |
4679 | 4682 |
4680 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4683 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
4681 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | 4684 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
4682 __ B(&done); | 4685 __ B(&done); |
4683 } | 4686 } |
4684 | 4687 |
4685 } else { | 4688 } else { |
4686 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4689 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
4687 // Fall through to load_smi. | 4690 // Fall through to load_smi. |
4688 } | 4691 } |
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4861 } | 4864 } |
4862 } | 4865 } |
4863 | 4866 |
4864 | 4867 |
4865 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4868 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
4866 HChange* hchange = instr->hydrogen(); | 4869 HChange* hchange = instr->hydrogen(); |
4867 Register input = ToRegister(instr->value()); | 4870 Register input = ToRegister(instr->value()); |
4868 Register output = ToRegister(instr->result()); | 4871 Register output = ToRegister(instr->result()); |
4869 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4872 if (hchange->CheckFlag(HValue::kCanOverflow) && |
4870 hchange->value()->CheckFlag(HValue::kUint32)) { | 4873 hchange->value()->CheckFlag(HValue::kUint32)) { |
4871 DeoptimizeIfNegative(input.W(), instr); | 4874 DeoptimizeIfNegative(input.W(), instr, "overflow"); |
4872 } | 4875 } |
4873 __ SmiTag(output, input); | 4876 __ SmiTag(output, input); |
4874 } | 4877 } |
4875 | 4878 |
4876 | 4879 |
4877 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4880 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
4878 Register input = ToRegister(instr->value()); | 4881 Register input = ToRegister(instr->value()); |
4879 Register result = ToRegister(instr->result()); | 4882 Register result = ToRegister(instr->result()); |
4880 Label done, untag; | 4883 Label done, untag; |
4881 | 4884 |
4882 if (instr->needs_check()) { | 4885 if (instr->needs_check()) { |
4883 DeoptimizeIfNotSmi(input, instr); | 4886 DeoptimizeIfNotSmi(input, instr, "not a Smi"); |
4884 } | 4887 } |
4885 | 4888 |
4886 __ Bind(&untag); | 4889 __ Bind(&untag); |
4887 __ SmiUntag(result, input); | 4890 __ SmiUntag(result, input); |
4888 __ Bind(&done); | 4891 __ Bind(&done); |
4889 } | 4892 } |
4890 | 4893 |
4891 | 4894 |
4892 void LCodeGen::DoShiftI(LShiftI* instr) { | 4895 void LCodeGen::DoShiftI(LShiftI* instr) { |
4893 LOperand* right_op = instr->right(); | 4896 LOperand* right_op = instr->right(); |
4894 Register left = ToRegister32(instr->left()); | 4897 Register left = ToRegister32(instr->left()); |
4895 Register result = ToRegister32(instr->result()); | 4898 Register result = ToRegister32(instr->result()); |
4896 | 4899 |
4897 if (right_op->IsRegister()) { | 4900 if (right_op->IsRegister()) { |
4898 Register right = ToRegister32(instr->right()); | 4901 Register right = ToRegister32(instr->right()); |
4899 switch (instr->op()) { | 4902 switch (instr->op()) { |
4900 case Token::ROR: __ Ror(result, left, right); break; | 4903 case Token::ROR: __ Ror(result, left, right); break; |
4901 case Token::SAR: __ Asr(result, left, right); break; | 4904 case Token::SAR: __ Asr(result, left, right); break; |
4902 case Token::SHL: __ Lsl(result, left, right); break; | 4905 case Token::SHL: __ Lsl(result, left, right); break; |
4903 case Token::SHR: | 4906 case Token::SHR: |
4904 __ Lsr(result, left, right); | 4907 __ Lsr(result, left, right); |
4905 if (instr->can_deopt()) { | 4908 if (instr->can_deopt()) { |
4906 // If `left >>> right` >= 0x80000000, the result is not representable | 4909 // If `left >>> right` >= 0x80000000, the result is not representable |
4907 // in a signed 32-bit smi. | 4910 // in a signed 32-bit smi. |
4908 DeoptimizeIfNegative(result, instr); | 4911 DeoptimizeIfNegative(result, instr, "negative value"); |
4909 } | 4912 } |
4910 break; | 4913 break; |
4911 default: UNREACHABLE(); | 4914 default: UNREACHABLE(); |
4912 } | 4915 } |
4913 } else { | 4916 } else { |
4914 DCHECK(right_op->IsConstantOperand()); | 4917 DCHECK(right_op->IsConstantOperand()); |
4915 int shift_count = JSShiftAmountFromLConstant(right_op); | 4918 int shift_count = JSShiftAmountFromLConstant(right_op); |
4916 if (shift_count == 0) { | 4919 if (shift_count == 0) { |
4917 if ((instr->op() == Token::SHR) && instr->can_deopt()) { | 4920 if ((instr->op() == Token::SHR) && instr->can_deopt()) { |
4918 DeoptimizeIfNegative(left, instr); | 4921 DeoptimizeIfNegative(left, instr, "negative value"); |
4919 } | 4922 } |
4920 __ Mov(result, left, kDiscardForSameWReg); | 4923 __ Mov(result, left, kDiscardForSameWReg); |
4921 } else { | 4924 } else { |
4922 switch (instr->op()) { | 4925 switch (instr->op()) { |
4923 case Token::ROR: __ Ror(result, left, shift_count); break; | 4926 case Token::ROR: __ Ror(result, left, shift_count); break; |
4924 case Token::SAR: __ Asr(result, left, shift_count); break; | 4927 case Token::SAR: __ Asr(result, left, shift_count); break; |
4925 case Token::SHL: __ Lsl(result, left, shift_count); break; | 4928 case Token::SHL: __ Lsl(result, left, shift_count); break; |
4926 case Token::SHR: __ Lsr(result, left, shift_count); break; | 4929 case Token::SHR: __ Lsr(result, left, shift_count); break; |
4927 default: UNREACHABLE(); | 4930 default: UNREACHABLE(); |
4928 } | 4931 } |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4961 break; | 4964 break; |
4962 case Token::SHL: | 4965 case Token::SHL: |
4963 __ Lsl(result, left, result); | 4966 __ Lsl(result, left, result); |
4964 break; | 4967 break; |
4965 case Token::SHR: | 4968 case Token::SHR: |
4966 __ Lsr(result, left, result); | 4969 __ Lsr(result, left, result); |
4967 __ Bic(result, result, kSmiShiftMask); | 4970 __ Bic(result, result, kSmiShiftMask); |
4968 if (instr->can_deopt()) { | 4971 if (instr->can_deopt()) { |
4969 // If `left >>> right` >= 0x80000000, the result is not representable | 4972 // If `left >>> right` >= 0x80000000, the result is not representable |
4970 // in a signed 32-bit smi. | 4973 // in a signed 32-bit smi. |
4971 DeoptimizeIfNegative(result, instr); | 4974 DeoptimizeIfNegative(result, instr, "negative value"); |
4972 } | 4975 } |
4973 break; | 4976 break; |
4974 default: UNREACHABLE(); | 4977 default: UNREACHABLE(); |
4975 } | 4978 } |
4976 } else { | 4979 } else { |
4977 DCHECK(right_op->IsConstantOperand()); | 4980 DCHECK(right_op->IsConstantOperand()); |
4978 int shift_count = JSShiftAmountFromLConstant(right_op); | 4981 int shift_count = JSShiftAmountFromLConstant(right_op); |
4979 if (shift_count == 0) { | 4982 if (shift_count == 0) { |
4980 if ((instr->op() == Token::SHR) && instr->can_deopt()) { | 4983 if ((instr->op() == Token::SHR) && instr->can_deopt()) { |
4981 DeoptimizeIfNegative(left, instr); | 4984 DeoptimizeIfNegative(left, instr, "negative value"); |
4982 } | 4985 } |
4983 __ Mov(result, left); | 4986 __ Mov(result, left); |
4984 } else { | 4987 } else { |
4985 switch (instr->op()) { | 4988 switch (instr->op()) { |
4986 case Token::ROR: | 4989 case Token::ROR: |
4987 __ SmiUntag(result, left); | 4990 __ SmiUntag(result, left); |
4988 __ Ror(result.W(), result.W(), shift_count); | 4991 __ Ror(result.W(), result.W(), shift_count); |
4989 __ SmiTag(result); | 4992 __ SmiTag(result); |
4990 break; | 4993 break; |
4991 case Token::SAR: | 4994 case Token::SAR: |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5100 Register context = ToRegister(instr->context()); | 5103 Register context = ToRegister(instr->context()); |
5101 Register value = ToRegister(instr->value()); | 5104 Register value = ToRegister(instr->value()); |
5102 Register scratch = ToRegister(instr->temp()); | 5105 Register scratch = ToRegister(instr->temp()); |
5103 MemOperand target = ContextMemOperand(context, instr->slot_index()); | 5106 MemOperand target = ContextMemOperand(context, instr->slot_index()); |
5104 | 5107 |
5105 Label skip_assignment; | 5108 Label skip_assignment; |
5106 | 5109 |
5107 if (instr->hydrogen()->RequiresHoleCheck()) { | 5110 if (instr->hydrogen()->RequiresHoleCheck()) { |
5108 __ Ldr(scratch, target); | 5111 __ Ldr(scratch, target); |
5109 if (instr->hydrogen()->DeoptimizesOnHole()) { | 5112 if (instr->hydrogen()->DeoptimizesOnHole()) { |
5110 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr); | 5113 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, "hole"); |
5111 } else { | 5114 } else { |
5112 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); | 5115 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); |
5113 } | 5116 } |
5114 } | 5117 } |
5115 | 5118 |
5116 __ Str(value, target); | 5119 __ Str(value, target); |
5117 if (instr->hydrogen()->NeedsWriteBarrier()) { | 5120 if (instr->hydrogen()->NeedsWriteBarrier()) { |
5118 SmiCheck check_needed = | 5121 SmiCheck check_needed = |
5119 instr->hydrogen()->value()->type().IsHeapObject() | 5122 instr->hydrogen()->value()->type().IsHeapObject() |
5120 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 5123 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
(...skipping 17 matching lines...) Expand all Loading... |
5138 // Load the cell. | 5141 // Load the cell. |
5139 __ Mov(cell, Operand(instr->hydrogen()->cell().handle())); | 5142 __ Mov(cell, Operand(instr->hydrogen()->cell().handle())); |
5140 | 5143 |
5141 // If the cell we are storing to contains the hole it could have | 5144 // If the cell we are storing to contains the hole it could have |
5142 // been deleted from the property dictionary. In that case, we need | 5145 // been deleted from the property dictionary. In that case, we need |
5143 // to update the property details in the property dictionary to mark | 5146 // to update the property details in the property dictionary to mark |
5144 // it as no longer deleted. We deoptimize in that case. | 5147 // it as no longer deleted. We deoptimize in that case. |
5145 if (instr->hydrogen()->RequiresHoleCheck()) { | 5148 if (instr->hydrogen()->RequiresHoleCheck()) { |
5146 Register payload = ToRegister(instr->temp2()); | 5149 Register payload = ToRegister(instr->temp2()); |
5147 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); | 5150 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
5148 DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr); | 5151 DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, "hole"); |
5149 } | 5152 } |
5150 | 5153 |
5151 // Store the value. | 5154 // Store the value. |
5152 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset)); | 5155 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset)); |
5153 // Cells are always rescanned, so no write barrier here. | 5156 // Cells are always rescanned, so no write barrier here. |
5154 } | 5157 } |
5155 | 5158 |
5156 | 5159 |
5157 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { | 5160 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { |
5158 Register ext_ptr = ToRegister(instr->elements()); | 5161 Register ext_ptr = ToRegister(instr->elements()); |
(...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5555 | 5558 |
5556 | 5559 |
5557 void LCodeGen::DoSubI(LSubI* instr) { | 5560 void LCodeGen::DoSubI(LSubI* instr) { |
5558 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 5561 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
5559 Register result = ToRegister32(instr->result()); | 5562 Register result = ToRegister32(instr->result()); |
5560 Register left = ToRegister32(instr->left()); | 5563 Register left = ToRegister32(instr->left()); |
5561 Operand right = ToShiftedRightOperand32(instr->right(), instr); | 5564 Operand right = ToShiftedRightOperand32(instr->right(), instr); |
5562 | 5565 |
5563 if (can_overflow) { | 5566 if (can_overflow) { |
5564 __ Subs(result, left, right); | 5567 __ Subs(result, left, right); |
5565 DeoptimizeIf(vs, instr); | 5568 DeoptimizeIf(vs, instr, "overflow"); |
5566 } else { | 5569 } else { |
5567 __ Sub(result, left, right); | 5570 __ Sub(result, left, right); |
5568 } | 5571 } |
5569 } | 5572 } |
5570 | 5573 |
5571 | 5574 |
5572 void LCodeGen::DoSubS(LSubS* instr) { | 5575 void LCodeGen::DoSubS(LSubS* instr) { |
5573 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 5576 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
5574 Register result = ToRegister(instr->result()); | 5577 Register result = ToRegister(instr->result()); |
5575 Register left = ToRegister(instr->left()); | 5578 Register left = ToRegister(instr->left()); |
5576 Operand right = ToOperand(instr->right()); | 5579 Operand right = ToOperand(instr->right()); |
5577 if (can_overflow) { | 5580 if (can_overflow) { |
5578 __ Subs(result, left, right); | 5581 __ Subs(result, left, right); |
5579 DeoptimizeIf(vs, instr); | 5582 DeoptimizeIf(vs, instr, "overflow"); |
5580 } else { | 5583 } else { |
5581 __ Sub(result, left, right); | 5584 __ Sub(result, left, right); |
5582 } | 5585 } |
5583 } | 5586 } |
5584 | 5587 |
5585 | 5588 |
5586 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, | 5589 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, |
5587 LOperand* value, | 5590 LOperand* value, |
5588 LOperand* temp1, | 5591 LOperand* temp1, |
5589 LOperand* temp2) { | 5592 LOperand* temp2) { |
(...skipping 19 matching lines...) Expand all Loading... |
5609 Register true_root = output; | 5612 Register true_root = output; |
5610 Register false_root = scratch1; | 5613 Register false_root = scratch1; |
5611 __ LoadTrueFalseRoots(true_root, false_root); | 5614 __ LoadTrueFalseRoots(true_root, false_root); |
5612 __ Cmp(input, true_root); | 5615 __ Cmp(input, true_root); |
5613 __ Cset(output, eq); | 5616 __ Cset(output, eq); |
5614 __ Ccmp(input, false_root, ZFlag, ne); | 5617 __ Ccmp(input, false_root, ZFlag, ne); |
5615 __ B(eq, &done); | 5618 __ B(eq, &done); |
5616 | 5619 |
5617 // Output contains zero, undefined is converted to zero for truncating | 5620 // Output contains zero, undefined is converted to zero for truncating |
5618 // conversions. | 5621 // conversions. |
5619 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr); | 5622 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, |
| 5623 "not a heap number/undefined/true/false"); |
5620 } else { | 5624 } else { |
5621 Register output = ToRegister32(instr->result()); | 5625 Register output = ToRegister32(instr->result()); |
5622 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); | 5626 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); |
5623 | 5627 |
5624 DeoptimizeIfNotHeapNumber(input, instr); | 5628 DeoptimizeIfNotHeapNumber(input, instr); |
5625 | 5629 |
5626 // A heap number: load value and convert to int32 using non-truncating | 5630 // A heap number: load value and convert to int32 using non-truncating |
5627 // function. If the result is out of range, branch to deoptimize. | 5631 // function. If the result is out of range, branch to deoptimize. |
5628 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); | 5632 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); |
5629 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); | 5633 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); |
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5767 } | 5771 } |
5768 | 5772 |
5769 | 5773 |
5770 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 5774 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
5771 Register object = ToRegister(instr->object()); | 5775 Register object = ToRegister(instr->object()); |
5772 Register temp1 = ToRegister(instr->temp1()); | 5776 Register temp1 = ToRegister(instr->temp1()); |
5773 Register temp2 = ToRegister(instr->temp2()); | 5777 Register temp2 = ToRegister(instr->temp2()); |
5774 | 5778 |
5775 Label no_memento_found; | 5779 Label no_memento_found; |
5776 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); | 5780 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); |
5777 DeoptimizeIf(eq, instr); | 5781 DeoptimizeIf(eq, instr, "memento found"); |
5778 __ Bind(&no_memento_found); | 5782 __ Bind(&no_memento_found); |
5779 } | 5783 } |
5780 | 5784 |
5781 | 5785 |
5782 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) { | 5786 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) { |
5783 DoubleRegister input = ToDoubleRegister(instr->value()); | 5787 DoubleRegister input = ToDoubleRegister(instr->value()); |
5784 Register result = ToRegister(instr->result()); | 5788 Register result = ToRegister(instr->result()); |
5785 __ TruncateDoubleToI(result, input); | 5789 __ TruncateDoubleToI(result, input); |
5786 if (instr->tag_result()) { | 5790 if (instr->tag_result()) { |
5787 __ SmiTag(result, result); | 5791 __ SmiTag(result, result); |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5892 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); | 5896 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); |
5893 } | 5897 } |
5894 | 5898 |
5895 | 5899 |
5896 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5900 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
5897 Register object = ToRegister(instr->value()); | 5901 Register object = ToRegister(instr->value()); |
5898 Register map = ToRegister(instr->map()); | 5902 Register map = ToRegister(instr->map()); |
5899 Register temp = ToRegister(instr->temp()); | 5903 Register temp = ToRegister(instr->temp()); |
5900 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | 5904 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
5901 __ Cmp(map, temp); | 5905 __ Cmp(map, temp); |
5902 DeoptimizeIf(ne, instr); | 5906 DeoptimizeIf(ne, instr, "wrong map"); |
5903 } | 5907 } |
5904 | 5908 |
5905 | 5909 |
5906 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { | 5910 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { |
5907 Register receiver = ToRegister(instr->receiver()); | 5911 Register receiver = ToRegister(instr->receiver()); |
5908 Register function = ToRegister(instr->function()); | 5912 Register function = ToRegister(instr->function()); |
5909 Register result = ToRegister(instr->result()); | 5913 Register result = ToRegister(instr->result()); |
5910 | 5914 |
5911 // If the receiver is null or undefined, we have to pass the global object as | 5915 // If the receiver is null or undefined, we have to pass the global object as |
5912 // a receiver to normal functions. Values have to be passed unchanged to | 5916 // a receiver to normal functions. Values have to be passed unchanged to |
(...skipping 13 matching lines...) Expand all Loading... |
5926 | 5930 |
5927 // Do not transform the receiver to object for builtins. | 5931 // Do not transform the receiver to object for builtins. |
5928 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver); | 5932 __ Tbnz(result, SharedFunctionInfo::kNative, ©_receiver); |
5929 } | 5933 } |
5930 | 5934 |
5931 // Normal function. Replace undefined or null with global receiver. | 5935 // Normal function. Replace undefined or null with global receiver. |
5932 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object); | 5936 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object); |
5933 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); | 5937 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); |
5934 | 5938 |
5935 // Deoptimize if the receiver is not a JS object. | 5939 // Deoptimize if the receiver is not a JS object. |
5936 DeoptimizeIfSmi(receiver, instr); | 5940 DeoptimizeIfSmi(receiver, instr, "Smi"); |
5937 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE); | 5941 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE); |
5938 __ B(ge, ©_receiver); | 5942 __ B(ge, ©_receiver); |
5939 Deoptimize(instr); | 5943 Deoptimize(instr, "not a JavaScript object"); |
5940 | 5944 |
5941 __ Bind(&global_object); | 5945 __ Bind(&global_object); |
5942 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 5946 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
5943 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX)); | 5947 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX)); |
5944 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | 5948 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); |
5945 __ B(&done); | 5949 __ B(&done); |
5946 | 5950 |
5947 __ Bind(©_receiver); | 5951 __ Bind(©_receiver); |
5948 __ Mov(result, receiver); | 5952 __ Mov(result, receiver); |
5949 __ Bind(&done); | 5953 __ Bind(&done); |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6035 Handle<ScopeInfo> scope_info = instr->scope_info(); | 6039 Handle<ScopeInfo> scope_info = instr->scope_info(); |
6036 __ Push(scope_info); | 6040 __ Push(scope_info); |
6037 __ Push(ToRegister(instr->function())); | 6041 __ Push(ToRegister(instr->function())); |
6038 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 6042 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
6039 RecordSafepoint(Safepoint::kNoLazyDeopt); | 6043 RecordSafepoint(Safepoint::kNoLazyDeopt); |
6040 } | 6044 } |
6041 | 6045 |
6042 | 6046 |
6043 | 6047 |
6044 } } // namespace v8::internal | 6048 } } // namespace v8::internal |
OLD | NEW |