Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(64)

Side by Side Diff: src/crankshaft/arm64/lithium-codegen-arm64.cc

Issue 2161543002: [turbofan] Add support for eager/soft deoptimization reasons. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Do the ports properly Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/crankshaft/arm64/lithium-codegen-arm64.h ('k') | src/crankshaft/hydrogen.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/crankshaft/arm64/lithium-codegen-arm64.h" 5 #include "src/crankshaft/arm64/lithium-codegen-arm64.h"
6 6
7 #include "src/arm64/frames-arm64.h" 7 #include "src/arm64/frames-arm64.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/code-factory.h" 9 #include "src/code-factory.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 806 matching lines...) Expand 10 before | Expand all | Expand 10 after
817 void LCodeGen::FinishCode(Handle<Code> code) { 817 void LCodeGen::FinishCode(Handle<Code> code) {
818 DCHECK(is_done()); 818 DCHECK(is_done());
819 code->set_stack_slots(GetTotalFrameSlotCount()); 819 code->set_stack_slots(GetTotalFrameSlotCount());
820 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 820 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
821 Handle<ByteArray> source_positions = 821 Handle<ByteArray> source_positions =
822 source_position_table_builder_.ToSourcePositionTable(); 822 source_position_table_builder_.ToSourcePositionTable();
823 code->set_source_position_table(*source_positions); 823 code->set_source_position_table(*source_positions);
824 PopulateDeoptimizationData(code); 824 PopulateDeoptimizationData(code);
825 } 825 }
826 826
827
828 void LCodeGen::DeoptimizeBranch( 827 void LCodeGen::DeoptimizeBranch(
829 LInstruction* instr, Deoptimizer::DeoptReason deopt_reason, 828 LInstruction* instr, DeoptimizeReason deopt_reason, BranchType branch_type,
830 BranchType branch_type, Register reg, int bit, 829 Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
831 Deoptimizer::BailoutType* override_bailout_type) {
832 LEnvironment* environment = instr->environment(); 830 LEnvironment* environment = instr->environment();
833 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 831 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
834 Deoptimizer::BailoutType bailout_type = 832 Deoptimizer::BailoutType bailout_type =
835 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; 833 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
836 834
837 if (override_bailout_type != NULL) { 835 if (override_bailout_type != NULL) {
838 bailout_type = *override_bailout_type; 836 bailout_type = *override_bailout_type;
839 } 837 }
840 838
841 DCHECK(environment->HasBeenRegistered()); 839 DCHECK(environment->HasBeenRegistered());
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
893 // jump entry if this is the case. 891 // jump entry if this is the case.
894 if (FLAG_trace_deopt || isolate()->is_profiling() || 892 if (FLAG_trace_deopt || isolate()->is_profiling() ||
895 jump_table_.is_empty() || 893 jump_table_.is_empty() ||
896 !table_entry->IsEquivalentTo(*jump_table_.last())) { 894 !table_entry->IsEquivalentTo(*jump_table_.last())) {
897 jump_table_.Add(table_entry, zone()); 895 jump_table_.Add(table_entry, zone());
898 } 896 }
899 __ B(&jump_table_.last()->label, branch_type, reg, bit); 897 __ B(&jump_table_.last()->label, branch_type, reg, bit);
900 } 898 }
901 } 899 }
902 900
903 901 void LCodeGen::Deoptimize(LInstruction* instr, DeoptimizeReason deopt_reason,
904 void LCodeGen::Deoptimize(LInstruction* instr,
905 Deoptimizer::DeoptReason deopt_reason,
906 Deoptimizer::BailoutType* override_bailout_type) { 902 Deoptimizer::BailoutType* override_bailout_type) {
907 DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1, 903 DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
908 override_bailout_type); 904 override_bailout_type);
909 } 905 }
910 906
911
912 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, 907 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
913 Deoptimizer::DeoptReason deopt_reason) { 908 DeoptimizeReason deopt_reason) {
914 DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond)); 909 DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
915 } 910 }
916 911
917
918 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr, 912 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
919 Deoptimizer::DeoptReason deopt_reason) { 913 DeoptimizeReason deopt_reason) {
920 DeoptimizeBranch(instr, deopt_reason, reg_zero, rt); 914 DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
921 } 915 }
922 916
923
924 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr, 917 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
925 Deoptimizer::DeoptReason deopt_reason) { 918 DeoptimizeReason deopt_reason) {
926 DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt); 919 DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
927 } 920 }
928 921
929
930 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr, 922 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
931 Deoptimizer::DeoptReason deopt_reason) { 923 DeoptimizeReason deopt_reason) {
932 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; 924 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
933 DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason); 925 DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
934 } 926 }
935 927
936
937 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr, 928 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
938 Deoptimizer::DeoptReason deopt_reason) { 929 DeoptimizeReason deopt_reason) {
939 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason); 930 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
940 } 931 }
941 932
942
943 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr, 933 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
944 Deoptimizer::DeoptReason deopt_reason) { 934 DeoptimizeReason deopt_reason) {
945 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason); 935 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
946 } 936 }
947 937
948
949 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, 938 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
950 LInstruction* instr, 939 LInstruction* instr,
951 Deoptimizer::DeoptReason deopt_reason) { 940 DeoptimizeReason deopt_reason) {
952 __ CompareRoot(rt, index); 941 __ CompareRoot(rt, index);
953 DeoptimizeIf(eq, instr, deopt_reason); 942 DeoptimizeIf(eq, instr, deopt_reason);
954 } 943 }
955 944
956
957 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, 945 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
958 LInstruction* instr, 946 LInstruction* instr,
959 Deoptimizer::DeoptReason deopt_reason) { 947 DeoptimizeReason deopt_reason) {
960 __ CompareRoot(rt, index); 948 __ CompareRoot(rt, index);
961 DeoptimizeIf(ne, instr, deopt_reason); 949 DeoptimizeIf(ne, instr, deopt_reason);
962 } 950 }
963 951
964
965 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, 952 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
966 Deoptimizer::DeoptReason deopt_reason) { 953 DeoptimizeReason deopt_reason) {
967 __ TestForMinusZero(input); 954 __ TestForMinusZero(input);
968 DeoptimizeIf(vs, instr, deopt_reason); 955 DeoptimizeIf(vs, instr, deopt_reason);
969 } 956 }
970 957
971 958
972 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) { 959 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
973 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex); 960 __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
974 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); 961 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
975 } 962 }
976 963
977
978 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, 964 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
979 Deoptimizer::DeoptReason deopt_reason) { 965 DeoptimizeReason deopt_reason) {
980 DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit); 966 DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
981 } 967 }
982 968
983
984 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, 969 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
985 Deoptimizer::DeoptReason deopt_reason) { 970 DeoptimizeReason deopt_reason) {
986 DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit); 971 DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
987 } 972 }
988 973
989 974
990 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 975 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
991 if (info()->ShouldEnsureSpaceForLazyDeopt()) { 976 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
992 // Ensure that we have enough space after the previous lazy-bailout 977 // Ensure that we have enough space after the previous lazy-bailout
993 // instruction for patching the code here. 978 // instruction for patching the code here.
994 intptr_t current_pc = masm()->pc_offset(); 979 intptr_t current_pc = masm()->pc_offset();
995 980
(...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after
1354 1339
1355 1340
1356 void LCodeGen::DoAddI(LAddI* instr) { 1341 void LCodeGen::DoAddI(LAddI* instr) {
1357 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1342 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1358 Register result = ToRegister32(instr->result()); 1343 Register result = ToRegister32(instr->result());
1359 Register left = ToRegister32(instr->left()); 1344 Register left = ToRegister32(instr->left());
1360 Operand right = ToShiftedRightOperand32(instr->right(), instr); 1345 Operand right = ToShiftedRightOperand32(instr->right(), instr);
1361 1346
1362 if (can_overflow) { 1347 if (can_overflow) {
1363 __ Adds(result, left, right); 1348 __ Adds(result, left, right);
1364 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 1349 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
1365 } else { 1350 } else {
1366 __ Add(result, left, right); 1351 __ Add(result, left, right);
1367 } 1352 }
1368 } 1353 }
1369 1354
1370 1355
1371 void LCodeGen::DoAddS(LAddS* instr) { 1356 void LCodeGen::DoAddS(LAddS* instr) {
1372 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1357 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1373 Register result = ToRegister(instr->result()); 1358 Register result = ToRegister(instr->result());
1374 Register left = ToRegister(instr->left()); 1359 Register left = ToRegister(instr->left());
1375 Operand right = ToOperand(instr->right()); 1360 Operand right = ToOperand(instr->right());
1376 if (can_overflow) { 1361 if (can_overflow) {
1377 __ Adds(result, left, right); 1362 __ Adds(result, left, right);
1378 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 1363 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
1379 } else { 1364 } else {
1380 __ Add(result, left, right); 1365 __ Add(result, left, right);
1381 } 1366 }
1382 } 1367 }
1383 1368
1384 1369
1385 void LCodeGen::DoAllocate(LAllocate* instr) { 1370 void LCodeGen::DoAllocate(LAllocate* instr) {
1386 class DeferredAllocate: public LDeferredCode { 1371 class DeferredAllocate: public LDeferredCode {
1387 public: 1372 public:
1388 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) 1373 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after
1531 Register scratch = x5; 1516 Register scratch = x5;
1532 DCHECK(receiver.Is(x0)); // Used for parameter count. 1517 DCHECK(receiver.Is(x0)); // Used for parameter count.
1533 DCHECK(function.Is(x1)); // Required by InvokeFunction. 1518 DCHECK(function.Is(x1)); // Required by InvokeFunction.
1534 DCHECK(ToRegister(instr->result()).Is(x0)); 1519 DCHECK(ToRegister(instr->result()).Is(x0));
1535 DCHECK(instr->IsMarkedAsCall()); 1520 DCHECK(instr->IsMarkedAsCall());
1536 1521
1537 // Copy the arguments to this function possibly from the 1522 // Copy the arguments to this function possibly from the
1538 // adaptor frame below it. 1523 // adaptor frame below it.
1539 const uint32_t kArgumentsLimit = 1 * KB; 1524 const uint32_t kArgumentsLimit = 1 * KB;
1540 __ Cmp(length, kArgumentsLimit); 1525 __ Cmp(length, kArgumentsLimit);
1541 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments); 1526 DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments);
1542 1527
1543 // Push the receiver and use the register to keep the original 1528 // Push the receiver and use the register to keep the original
1544 // number of arguments. 1529 // number of arguments.
1545 __ Push(receiver); 1530 __ Push(receiver);
1546 Register argc = receiver; 1531 Register argc = receiver;
1547 receiver = NoReg; 1532 receiver = NoReg;
1548 __ Sxtw(argc, length); 1533 __ Sxtw(argc, length);
1549 // The arguments are at a one pointer size offset from elements. 1534 // The arguments are at a one pointer size offset from elements.
1550 __ Add(elements, elements, 1 * kPointerSize); 1535 __ Add(elements, elements, 1 * kPointerSize);
1551 1536
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
1723 __ Cmp(length, index); 1708 __ Cmp(length, index);
1724 cond = CommuteCondition(cond); 1709 cond = CommuteCondition(cond);
1725 } else { 1710 } else {
1726 Register index = ToRegister32(instr->index()); 1711 Register index = ToRegister32(instr->index());
1727 Operand length = ToOperand32(instr->length()); 1712 Operand length = ToOperand32(instr->length());
1728 __ Cmp(index, length); 1713 __ Cmp(index, length);
1729 } 1714 }
1730 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 1715 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
1731 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); 1716 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
1732 } else { 1717 } else {
1733 DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds); 1718 DeoptimizeIf(cond, instr, DeoptimizeReason::kOutOfBounds);
1734 } 1719 }
1735 } 1720 }
1736 1721
1737 1722
1738 void LCodeGen::DoBranch(LBranch* instr) { 1723 void LCodeGen::DoBranch(LBranch* instr) {
1739 Representation r = instr->hydrogen()->value()->representation(); 1724 Representation r = instr->hydrogen()->value()->representation();
1740 Label* true_label = instr->TrueLabel(chunk_); 1725 Label* true_label = instr->TrueLabel(chunk_);
1741 Label* false_label = instr->FalseLabel(chunk_); 1726 Label* false_label = instr->FalseLabel(chunk_);
1742 1727
1743 if (r.IsInteger32()) { 1728 if (r.IsInteger32()) {
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1803 value, Heap::kNullValueRootIndex, false_label); 1788 value, Heap::kNullValueRootIndex, false_label);
1804 } 1789 }
1805 1790
1806 if (expected.Contains(ToBooleanICStub::SMI)) { 1791 if (expected.Contains(ToBooleanICStub::SMI)) {
1807 // Smis: 0 -> false, all other -> true. 1792 // Smis: 0 -> false, all other -> true.
1808 DCHECK(Smi::FromInt(0) == 0); 1793 DCHECK(Smi::FromInt(0) == 0);
1809 __ Cbz(value, false_label); 1794 __ Cbz(value, false_label);
1810 __ JumpIfSmi(value, true_label); 1795 __ JumpIfSmi(value, true_label);
1811 } else if (expected.NeedsMap()) { 1796 } else if (expected.NeedsMap()) {
1812 // If we need a map later and have a smi, deopt. 1797 // If we need a map later and have a smi, deopt.
1813 DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi); 1798 DeoptimizeIfSmi(value, instr, DeoptimizeReason::kSmi);
1814 } 1799 }
1815 1800
1816 Register map = NoReg; 1801 Register map = NoReg;
1817 Register scratch = NoReg; 1802 Register scratch = NoReg;
1818 1803
1819 if (expected.NeedsMap()) { 1804 if (expected.NeedsMap()) {
1820 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); 1805 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
1821 map = ToRegister(instr->temp1()); 1806 map = ToRegister(instr->temp1());
1822 scratch = ToRegister(instr->temp2()); 1807 scratch = ToRegister(instr->temp2());
1823 1808
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
1870 // If we got a NaN (overflow bit is set), jump to the false branch. 1855 // If we got a NaN (overflow bit is set), jump to the false branch.
1871 __ B(vs, false_label); 1856 __ B(vs, false_label);
1872 __ B(eq, false_label); 1857 __ B(eq, false_label);
1873 __ B(true_label); 1858 __ B(true_label);
1874 __ Bind(&not_heap_number); 1859 __ Bind(&not_heap_number);
1875 } 1860 }
1876 1861
1877 if (!expected.IsGeneric()) { 1862 if (!expected.IsGeneric()) {
1878 // We've seen something for the first time -> deopt. 1863 // We've seen something for the first time -> deopt.
1879 // This can only happen if we are not generic already. 1864 // This can only happen if we are not generic already.
1880 Deoptimize(instr, Deoptimizer::kUnexpectedObject); 1865 Deoptimize(instr, DeoptimizeReason::kUnexpectedObject);
1881 } 1866 }
1882 } 1867 }
1883 } 1868 }
1884 } 1869 }
1885 1870
1886 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 1871 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
1887 int formal_parameter_count, int arity, 1872 int formal_parameter_count, int arity,
1888 bool is_tail_call, LInstruction* instr) { 1873 bool is_tail_call, LInstruction* instr) {
1889 bool dont_adapt_arguments = 1874 bool dont_adapt_arguments =
1890 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; 1875 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
2006 Register temp = ToRegister(instr->temp()); 1991 Register temp = ToRegister(instr->temp());
2007 { 1992 {
2008 PushSafepointRegistersScope scope(this); 1993 PushSafepointRegistersScope scope(this);
2009 __ Push(object); 1994 __ Push(object);
2010 __ Mov(cp, 0); 1995 __ Mov(cp, 0);
2011 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 1996 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2012 RecordSafepointWithRegisters( 1997 RecordSafepointWithRegisters(
2013 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); 1998 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2014 __ StoreToSafepointRegisterSlot(x0, temp); 1999 __ StoreToSafepointRegisterSlot(x0, temp);
2015 } 2000 }
2016 DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed); 2001 DeoptimizeIfSmi(temp, instr, DeoptimizeReason::kInstanceMigrationFailed);
2017 } 2002 }
2018 2003
2019 2004
2020 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 2005 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2021 class DeferredCheckMaps: public LDeferredCode { 2006 class DeferredCheckMaps: public LDeferredCode {
2022 public: 2007 public:
2023 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 2008 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2024 : LDeferredCode(codegen), instr_(instr), object_(object) { 2009 : LDeferredCode(codegen), instr_(instr), object_(object) {
2025 SetExit(check_maps()); 2010 SetExit(check_maps());
2026 } 2011 }
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
2061 __ CompareMap(map_reg, map); 2046 __ CompareMap(map_reg, map);
2062 __ B(eq, &success); 2047 __ B(eq, &success);
2063 } 2048 }
2064 Handle<Map> map = maps->at(maps->size() - 1).handle(); 2049 Handle<Map> map = maps->at(maps->size() - 1).handle();
2065 __ CompareMap(map_reg, map); 2050 __ CompareMap(map_reg, map);
2066 2051
2067 // We didn't match a map. 2052 // We didn't match a map.
2068 if (instr->hydrogen()->HasMigrationTarget()) { 2053 if (instr->hydrogen()->HasMigrationTarget()) {
2069 __ B(ne, deferred->entry()); 2054 __ B(ne, deferred->entry());
2070 } else { 2055 } else {
2071 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); 2056 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
2072 } 2057 }
2073 2058
2074 __ Bind(&success); 2059 __ Bind(&success);
2075 } 2060 }
2076 2061
2077 2062
2078 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 2063 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2079 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2064 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2080 DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi); 2065 DeoptimizeIfSmi(ToRegister(instr->value()), instr, DeoptimizeReason::kSmi);
2081 } 2066 }
2082 } 2067 }
2083 2068
2084 2069
2085 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 2070 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2086 Register value = ToRegister(instr->value()); 2071 Register value = ToRegister(instr->value());
2087 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value)); 2072 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
2088 DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi); 2073 DeoptimizeIfNotSmi(value, instr, DeoptimizeReason::kNotASmi);
2089 } 2074 }
2090 2075
2091 2076
2092 void LCodeGen::DoCheckArrayBufferNotNeutered( 2077 void LCodeGen::DoCheckArrayBufferNotNeutered(
2093 LCheckArrayBufferNotNeutered* instr) { 2078 LCheckArrayBufferNotNeutered* instr) {
2094 UseScratchRegisterScope temps(masm()); 2079 UseScratchRegisterScope temps(masm());
2095 Register view = ToRegister(instr->view()); 2080 Register view = ToRegister(instr->view());
2096 Register scratch = temps.AcquireX(); 2081 Register scratch = temps.AcquireX();
2097 2082
2098 __ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); 2083 __ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
2099 __ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); 2084 __ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
2100 __ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); 2085 __ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
2101 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds); 2086 DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds);
2102 } 2087 }
2103 2088
2104 2089
2105 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 2090 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2106 Register input = ToRegister(instr->value()); 2091 Register input = ToRegister(instr->value());
2107 Register scratch = ToRegister(instr->temp()); 2092 Register scratch = ToRegister(instr->temp());
2108 2093
2109 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 2094 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2110 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 2095 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2111 2096
2112 if (instr->hydrogen()->is_interval_check()) { 2097 if (instr->hydrogen()->is_interval_check()) {
2113 InstanceType first, last; 2098 InstanceType first, last;
2114 instr->hydrogen()->GetCheckInterval(&first, &last); 2099 instr->hydrogen()->GetCheckInterval(&first, &last);
2115 2100
2116 __ Cmp(scratch, first); 2101 __ Cmp(scratch, first);
2117 if (first == last) { 2102 if (first == last) {
2118 // If there is only one type in the interval check for equality. 2103 // If there is only one type in the interval check for equality.
2119 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); 2104 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
2120 } else if (last == LAST_TYPE) { 2105 } else if (last == LAST_TYPE) {
2121 // We don't need to compare with the higher bound of the interval. 2106 // We don't need to compare with the higher bound of the interval.
2122 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType); 2107 DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType);
2123 } else { 2108 } else {
2124 // If we are below the lower bound, set the C flag and clear the Z flag 2109 // If we are below the lower bound, set the C flag and clear the Z flag
2125 // to force a deopt. 2110 // to force a deopt.
2126 __ Ccmp(scratch, last, CFlag, hs); 2111 __ Ccmp(scratch, last, CFlag, hs);
2127 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType); 2112 DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType);
2128 } 2113 }
2129 } else { 2114 } else {
2130 uint8_t mask; 2115 uint8_t mask;
2131 uint8_t tag; 2116 uint8_t tag;
2132 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 2117 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2133 2118
2134 if (base::bits::IsPowerOfTwo32(mask)) { 2119 if (base::bits::IsPowerOfTwo32(mask)) {
2135 DCHECK((tag == 0) || (tag == mask)); 2120 DCHECK((tag == 0) || (tag == mask));
2136 if (tag == 0) { 2121 if (tag == 0) {
2137 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr, 2122 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
2138 Deoptimizer::kWrongInstanceType); 2123 DeoptimizeReason::kWrongInstanceType);
2139 } else { 2124 } else {
2140 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr, 2125 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
2141 Deoptimizer::kWrongInstanceType); 2126 DeoptimizeReason::kWrongInstanceType);
2142 } 2127 }
2143 } else { 2128 } else {
2144 if (tag == 0) { 2129 if (tag == 0) {
2145 __ Tst(scratch, mask); 2130 __ Tst(scratch, mask);
2146 } else { 2131 } else {
2147 __ And(scratch, scratch, mask); 2132 __ And(scratch, scratch, mask);
2148 __ Cmp(scratch, tag); 2133 __ Cmp(scratch, tag);
2149 } 2134 }
2150 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); 2135 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
2151 } 2136 }
2152 } 2137 }
2153 } 2138 }
2154 2139
2155 2140
2156 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 2141 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2157 DoubleRegister input = ToDoubleRegister(instr->unclamped()); 2142 DoubleRegister input = ToDoubleRegister(instr->unclamped());
2158 Register result = ToRegister32(instr->result()); 2143 Register result = ToRegister32(instr->result());
2159 __ ClampDoubleToUint8(result, input, double_scratch()); 2144 __ ClampDoubleToUint8(result, input, double_scratch());
2160 } 2145 }
(...skipping 19 matching lines...) Expand all
2180 __ B(&done); 2165 __ B(&done);
2181 2166
2182 __ Bind(&is_not_smi); 2167 __ Bind(&is_not_smi);
2183 2168
2184 // Check for heap number. 2169 // Check for heap number.
2185 Label is_heap_number; 2170 Label is_heap_number;
2186 __ JumpIfHeapNumber(input, &is_heap_number); 2171 __ JumpIfHeapNumber(input, &is_heap_number);
2187 2172
2188 // Check for undefined. Undefined is coverted to zero for clamping conversion. 2173 // Check for undefined. Undefined is coverted to zero for clamping conversion.
2189 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, 2174 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
2190 Deoptimizer::kNotAHeapNumberUndefined); 2175 DeoptimizeReason::kNotAHeapNumberUndefined);
2191 __ Mov(result, 0); 2176 __ Mov(result, 0);
2192 __ B(&done); 2177 __ B(&done);
2193 2178
2194 // Heap number case. 2179 // Heap number case.
2195 __ Bind(&is_heap_number); 2180 __ Bind(&is_heap_number);
2196 DoubleRegister dbl_scratch = double_scratch(); 2181 DoubleRegister dbl_scratch = double_scratch();
2197 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1()); 2182 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
2198 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); 2183 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2199 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); 2184 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2200 2185
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after
2433 if (isolate()->heap()->InNewSpace(*object)) { 2418 if (isolate()->heap()->InNewSpace(*object)) {
2434 UseScratchRegisterScope temps(masm()); 2419 UseScratchRegisterScope temps(masm());
2435 Register temp = temps.AcquireX(); 2420 Register temp = temps.AcquireX();
2436 Handle<Cell> cell = isolate()->factory()->NewCell(object); 2421 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2437 __ Mov(temp, Operand(cell)); 2422 __ Mov(temp, Operand(cell));
2438 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); 2423 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2439 __ Cmp(reg, temp); 2424 __ Cmp(reg, temp);
2440 } else { 2425 } else {
2441 __ Cmp(reg, Operand(object)); 2426 __ Cmp(reg, Operand(object));
2442 } 2427 }
2443 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); 2428 DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
2444 } 2429 }
2445 2430
2446 2431
2447 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 2432 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2448 last_lazy_deopt_pc_ = masm()->pc_offset(); 2433 last_lazy_deopt_pc_ = masm()->pc_offset();
2449 DCHECK(instr->HasEnvironment()); 2434 DCHECK(instr->HasEnvironment());
2450 LEnvironment* env = instr->environment(); 2435 LEnvironment* env = instr->environment();
2451 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 2436 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2452 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2437 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2453 } 2438 }
(...skipping 16 matching lines...) Expand all
2470 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 2455 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2471 Register dividend = ToRegister32(instr->dividend()); 2456 Register dividend = ToRegister32(instr->dividend());
2472 int32_t divisor = instr->divisor(); 2457 int32_t divisor = instr->divisor();
2473 Register result = ToRegister32(instr->result()); 2458 Register result = ToRegister32(instr->result());
2474 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); 2459 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
2475 DCHECK(!result.is(dividend)); 2460 DCHECK(!result.is(dividend));
2476 2461
2477 // Check for (0 / -x) that will produce negative zero. 2462 // Check for (0 / -x) that will produce negative zero.
2478 HDiv* hdiv = instr->hydrogen(); 2463 HDiv* hdiv = instr->hydrogen();
2479 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 2464 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2480 DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero); 2465 DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kDivisionByZero);
2481 } 2466 }
2482 // Check for (kMinInt / -1). 2467 // Check for (kMinInt / -1).
2483 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 2468 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2484 // Test dividend for kMinInt by subtracting one (cmp) and checking for 2469 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2485 // overflow. 2470 // overflow.
2486 __ Cmp(dividend, 1); 2471 __ Cmp(dividend, 1);
2487 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 2472 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
2488 } 2473 }
2489 // Deoptimize if remainder will not be 0. 2474 // Deoptimize if remainder will not be 0.
2490 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && 2475 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2491 divisor != 1 && divisor != -1) { 2476 divisor != 1 && divisor != -1) {
2492 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 2477 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2493 __ Tst(dividend, mask); 2478 __ Tst(dividend, mask);
2494 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); 2479 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
2495 } 2480 }
2496 2481
2497 if (divisor == -1) { // Nice shortcut, not needed for correctness. 2482 if (divisor == -1) { // Nice shortcut, not needed for correctness.
2498 __ Neg(result, dividend); 2483 __ Neg(result, dividend);
2499 return; 2484 return;
2500 } 2485 }
2501 int32_t shift = WhichPowerOf2Abs(divisor); 2486 int32_t shift = WhichPowerOf2Abs(divisor);
2502 if (shift == 0) { 2487 if (shift == 0) {
2503 __ Mov(result, dividend); 2488 __ Mov(result, dividend);
2504 } else if (shift == 1) { 2489 } else if (shift == 1) {
2505 __ Add(result, dividend, Operand(dividend, LSR, 31)); 2490 __ Add(result, dividend, Operand(dividend, LSR, 31));
2506 } else { 2491 } else {
2507 __ Mov(result, Operand(dividend, ASR, 31)); 2492 __ Mov(result, Operand(dividend, ASR, 31));
2508 __ Add(result, dividend, Operand(result, LSR, 32 - shift)); 2493 __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2509 } 2494 }
2510 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); 2495 if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2511 if (divisor < 0) __ Neg(result, result); 2496 if (divisor < 0) __ Neg(result, result);
2512 } 2497 }
2513 2498
2514 2499
2515 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 2500 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2516 Register dividend = ToRegister32(instr->dividend()); 2501 Register dividend = ToRegister32(instr->dividend());
2517 int32_t divisor = instr->divisor(); 2502 int32_t divisor = instr->divisor();
2518 Register result = ToRegister32(instr->result()); 2503 Register result = ToRegister32(instr->result());
2519 DCHECK(!AreAliased(dividend, result)); 2504 DCHECK(!AreAliased(dividend, result));
2520 2505
2521 if (divisor == 0) { 2506 if (divisor == 0) {
2522 Deoptimize(instr, Deoptimizer::kDivisionByZero); 2507 Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
2523 return; 2508 return;
2524 } 2509 }
2525 2510
2526 // Check for (0 / -x) that will produce negative zero. 2511 // Check for (0 / -x) that will produce negative zero.
2527 HDiv* hdiv = instr->hydrogen(); 2512 HDiv* hdiv = instr->hydrogen();
2528 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 2513 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2529 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero); 2514 DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero);
2530 } 2515 }
2531 2516
2532 __ TruncatingDiv(result, dividend, Abs(divisor)); 2517 __ TruncatingDiv(result, dividend, Abs(divisor));
2533 if (divisor < 0) __ Neg(result, result); 2518 if (divisor < 0) __ Neg(result, result);
2534 2519
2535 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 2520 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2536 Register temp = ToRegister32(instr->temp()); 2521 Register temp = ToRegister32(instr->temp());
2537 DCHECK(!AreAliased(dividend, result, temp)); 2522 DCHECK(!AreAliased(dividend, result, temp));
2538 __ Sxtw(dividend.X(), dividend); 2523 __ Sxtw(dividend.X(), dividend);
2539 __ Mov(temp, divisor); 2524 __ Mov(temp, divisor);
2540 __ Smsubl(temp.X(), result, temp, dividend.X()); 2525 __ Smsubl(temp.X(), result, temp, dividend.X());
2541 DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision); 2526 DeoptimizeIfNotZero(temp, instr, DeoptimizeReason::kLostPrecision);
2542 } 2527 }
2543 } 2528 }
2544 2529
2545 2530
2546 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 2531 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
2547 void LCodeGen::DoDivI(LDivI* instr) { 2532 void LCodeGen::DoDivI(LDivI* instr) {
2548 HBinaryOperation* hdiv = instr->hydrogen(); 2533 HBinaryOperation* hdiv = instr->hydrogen();
2549 Register dividend = ToRegister32(instr->dividend()); 2534 Register dividend = ToRegister32(instr->dividend());
2550 Register divisor = ToRegister32(instr->divisor()); 2535 Register divisor = ToRegister32(instr->divisor());
2551 Register result = ToRegister32(instr->result()); 2536 Register result = ToRegister32(instr->result());
2552 2537
2553 // Issue the division first, and then check for any deopt cases whilst the 2538 // Issue the division first, and then check for any deopt cases whilst the
2554 // result is computed. 2539 // result is computed.
2555 __ Sdiv(result, dividend, divisor); 2540 __ Sdiv(result, dividend, divisor);
2556 2541
2557 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 2542 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2558 DCHECK(!instr->temp()); 2543 DCHECK(!instr->temp());
2559 return; 2544 return;
2560 } 2545 }
2561 2546
2562 // Check for x / 0. 2547 // Check for x / 0.
2563 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 2548 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2564 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); 2549 DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
2565 } 2550 }
2566 2551
2567 // Check for (0 / -x) as that will produce negative zero. 2552 // Check for (0 / -x) as that will produce negative zero.
2568 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 2553 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2569 __ Cmp(divisor, 0); 2554 __ Cmp(divisor, 0);
2570 2555
2571 // If the divisor < 0 (mi), compare the dividend, and deopt if it is 2556 // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2572 // zero, ie. zero dividend with negative divisor deopts. 2557 // zero, ie. zero dividend with negative divisor deopts.
2573 // If the divisor >= 0 (pl, the opposite of mi) set the flags to 2558 // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2574 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. 2559 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2575 __ Ccmp(dividend, 0, NoFlag, mi); 2560 __ Ccmp(dividend, 0, NoFlag, mi);
2576 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 2561 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
2577 } 2562 }
2578 2563
2579 // Check for (kMinInt / -1). 2564 // Check for (kMinInt / -1).
2580 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 2565 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2581 // Test dividend for kMinInt by subtracting one (cmp) and checking for 2566 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2582 // overflow. 2567 // overflow.
2583 __ Cmp(dividend, 1); 2568 __ Cmp(dividend, 1);
2584 // If overflow is set, ie. dividend = kMinInt, compare the divisor with 2569 // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2585 // -1. If overflow is clear, set the flags for condition ne, as the 2570 // -1. If overflow is clear, set the flags for condition ne, as the
2586 // dividend isn't -1, and thus we shouldn't deopt. 2571 // dividend isn't -1, and thus we shouldn't deopt.
2587 __ Ccmp(divisor, -1, NoFlag, vs); 2572 __ Ccmp(divisor, -1, NoFlag, vs);
2588 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 2573 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
2589 } 2574 }
2590 2575
2591 // Compute remainder and deopt if it's not zero. 2576 // Compute remainder and deopt if it's not zero.
2592 Register remainder = ToRegister32(instr->temp()); 2577 Register remainder = ToRegister32(instr->temp());
2593 __ Msub(remainder, result, divisor, dividend); 2578 __ Msub(remainder, result, divisor, dividend);
2594 DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision); 2579 DeoptimizeIfNotZero(remainder, instr, DeoptimizeReason::kLostPrecision);
2595 } 2580 }
2596 2581
2597 2582
2598 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { 2583 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2599 DoubleRegister input = ToDoubleRegister(instr->value()); 2584 DoubleRegister input = ToDoubleRegister(instr->value());
2600 Register result = ToRegister32(instr->result()); 2585 Register result = ToRegister32(instr->result());
2601 2586
2602 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 2587 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2603 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero); 2588 DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero);
2604 } 2589 }
2605 2590
2606 __ TryRepresentDoubleAsInt32(result, input, double_scratch()); 2591 __ TryRepresentDoubleAsInt32(result, input, double_scratch());
2607 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 2592 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
2608 2593
2609 if (instr->tag_result()) { 2594 if (instr->tag_result()) {
2610 __ SmiTag(result.X()); 2595 __ SmiTag(result.X());
2611 } 2596 }
2612 } 2597 }
2613 2598
2614 2599
2615 void LCodeGen::DoDrop(LDrop* instr) { 2600 void LCodeGen::DoDrop(LDrop* instr) {
2616 __ Drop(instr->count()); 2601 __ Drop(instr->count());
2617 2602
(...skipping 19 matching lines...) Expand all
2637 __ EnumLengthUntagged(result, map); 2622 __ EnumLengthUntagged(result, map);
2638 __ Cbnz(result, &load_cache); 2623 __ Cbnz(result, &load_cache);
2639 2624
2640 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array())); 2625 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2641 __ B(&done); 2626 __ B(&done);
2642 2627
2643 __ Bind(&load_cache); 2628 __ Bind(&load_cache);
2644 __ LoadInstanceDescriptors(map, result); 2629 __ LoadInstanceDescriptors(map, result);
2645 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 2630 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2646 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 2631 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2647 DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache); 2632 DeoptimizeIfZero(result, instr, DeoptimizeReason::kNoCache);
2648 2633
2649 __ Bind(&done); 2634 __ Bind(&done);
2650 } 2635 }
2651 2636
2652 2637
2653 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 2638 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2654 Register object = ToRegister(instr->object()); 2639 Register object = ToRegister(instr->object());
2655 2640
2656 DCHECK(instr->IsMarkedAsCall()); 2641 DCHECK(instr->IsMarkedAsCall());
2657 DCHECK(object.Is(x0)); 2642 DCHECK(object.Is(x0));
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
2783 2768
2784 // Loop through the {object}s prototype chain looking for the {prototype}. 2769 // Loop through the {object}s prototype chain looking for the {prototype}.
2785 __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); 2770 __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2786 Label loop; 2771 Label loop;
2787 __ Bind(&loop); 2772 __ Bind(&loop);
2788 2773
2789 // Deoptimize if the object needs to be access checked. 2774 // Deoptimize if the object needs to be access checked.
2790 __ Ldrb(object_instance_type, 2775 __ Ldrb(object_instance_type,
2791 FieldMemOperand(object_map, Map::kBitFieldOffset)); 2776 FieldMemOperand(object_map, Map::kBitFieldOffset));
2792 __ Tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded)); 2777 __ Tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
2793 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck); 2778 DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck);
2794 // Deoptimize for proxies. 2779 // Deoptimize for proxies.
2795 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); 2780 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
2796 DeoptimizeIf(eq, instr, Deoptimizer::kProxy); 2781 DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
2797 2782
2798 __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); 2783 __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
2799 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); 2784 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
2800 __ B(eq, instr->FalseLabel(chunk_)); 2785 __ B(eq, instr->FalseLabel(chunk_));
2801 __ Cmp(object_prototype, prototype); 2786 __ Cmp(object_prototype, prototype);
2802 __ B(eq, instr->TrueLabel(chunk_)); 2787 __ B(eq, instr->TrueLabel(chunk_));
2803 __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset)); 2788 __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2804 __ B(&loop); 2789 __ B(&loop);
2805 } 2790 }
2806 2791
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
2974 } 2959 }
2975 2960
2976 2961
2977 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2962 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2978 Register context = ToRegister(instr->context()); 2963 Register context = ToRegister(instr->context());
2979 Register result = ToRegister(instr->result()); 2964 Register result = ToRegister(instr->result());
2980 __ Ldr(result, ContextMemOperand(context, instr->slot_index())); 2965 __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
2981 if (instr->hydrogen()->RequiresHoleCheck()) { 2966 if (instr->hydrogen()->RequiresHoleCheck()) {
2982 if (instr->hydrogen()->DeoptimizesOnHole()) { 2967 if (instr->hydrogen()->DeoptimizesOnHole()) {
2983 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, 2968 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
2984 Deoptimizer::kHole); 2969 DeoptimizeReason::kHole);
2985 } else { 2970 } else {
2986 Label not_the_hole; 2971 Label not_the_hole;
2987 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole); 2972 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
2988 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 2973 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2989 __ Bind(&not_the_hole); 2974 __ Bind(&not_the_hole);
2990 } 2975 }
2991 } 2976 }
2992 } 2977 }
2993 2978
2994 2979
2995 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 2980 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2996 Register function = ToRegister(instr->function()); 2981 Register function = ToRegister(instr->function());
2997 Register result = ToRegister(instr->result()); 2982 Register result = ToRegister(instr->result());
2998 Register temp = ToRegister(instr->temp()); 2983 Register temp = ToRegister(instr->temp());
2999 2984
3000 // Get the prototype or initial map from the function. 2985 // Get the prototype or initial map from the function.
3001 __ Ldr(result, FieldMemOperand(function, 2986 __ Ldr(result, FieldMemOperand(function,
3002 JSFunction::kPrototypeOrInitialMapOffset)); 2987 JSFunction::kPrototypeOrInitialMapOffset));
3003 2988
3004 // Check that the function has a prototype or an initial map. 2989 // Check that the function has a prototype or an initial map.
3005 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, 2990 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3006 Deoptimizer::kHole); 2991 DeoptimizeReason::kHole);
3007 2992
3008 // If the function does not have an initial map, we're done. 2993 // If the function does not have an initial map, we're done.
3009 Label done; 2994 Label done;
3010 __ CompareObjectType(result, temp, temp, MAP_TYPE); 2995 __ CompareObjectType(result, temp, temp, MAP_TYPE);
3011 __ B(ne, &done); 2996 __ B(ne, &done);
3012 2997
3013 // Get the prototype from the initial map. 2998 // Get the prototype from the initial map.
3014 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); 2999 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3015 3000
3016 // All done. 3001 // All done.
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
3143 __ Ldrh(result, mem_op); 3128 __ Ldrh(result, mem_op);
3144 break; 3129 break;
3145 case INT32_ELEMENTS: 3130 case INT32_ELEMENTS:
3146 __ Ldrsw(result, mem_op); 3131 __ Ldrsw(result, mem_op);
3147 break; 3132 break;
3148 case UINT32_ELEMENTS: 3133 case UINT32_ELEMENTS:
3149 __ Ldr(result.W(), mem_op); 3134 __ Ldr(result.W(), mem_op);
3150 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 3135 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3151 // Deopt if value > 0x80000000. 3136 // Deopt if value > 0x80000000.
3152 __ Tst(result, 0xFFFFFFFF80000000); 3137 __ Tst(result, 0xFFFFFFFF80000000);
3153 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue); 3138 DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue);
3154 } 3139 }
3155 break; 3140 break;
3156 case FLOAT32_ELEMENTS: 3141 case FLOAT32_ELEMENTS:
3157 case FLOAT64_ELEMENTS: 3142 case FLOAT64_ELEMENTS:
3158 case FAST_HOLEY_DOUBLE_ELEMENTS: 3143 case FAST_HOLEY_DOUBLE_ELEMENTS:
3159 case FAST_HOLEY_ELEMENTS: 3144 case FAST_HOLEY_ELEMENTS:
3160 case FAST_HOLEY_SMI_ELEMENTS: 3145 case FAST_HOLEY_SMI_ELEMENTS:
3161 case FAST_DOUBLE_ELEMENTS: 3146 case FAST_DOUBLE_ELEMENTS:
3162 case FAST_ELEMENTS: 3147 case FAST_ELEMENTS:
3163 case FAST_SMI_ELEMENTS: 3148 case FAST_SMI_ELEMENTS:
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
3239 instr->hydrogen()->representation(), 3224 instr->hydrogen()->representation(),
3240 instr->base_offset()); 3225 instr->base_offset());
3241 } 3226 }
3242 3227
3243 __ Ldr(result, mem_op); 3228 __ Ldr(result, mem_op);
3244 3229
3245 if (instr->hydrogen()->RequiresHoleCheck()) { 3230 if (instr->hydrogen()->RequiresHoleCheck()) {
3246 Register scratch = ToRegister(instr->temp()); 3231 Register scratch = ToRegister(instr->temp());
3247 __ Fmov(scratch, result); 3232 __ Fmov(scratch, result);
3248 __ Eor(scratch, scratch, kHoleNanInt64); 3233 __ Eor(scratch, scratch, kHoleNanInt64);
3249 DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole); 3234 DeoptimizeIfZero(scratch, instr, DeoptimizeReason::kHole);
3250 } 3235 }
3251 } 3236 }
3252 3237
3253 3238
3254 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { 3239 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3255 Register elements = ToRegister(instr->elements()); 3240 Register elements = ToRegister(instr->elements());
3256 Register result = ToRegister(instr->result()); 3241 Register result = ToRegister(instr->result());
3257 MemOperand mem_op; 3242 MemOperand mem_op;
3258 3243
3259 Representation representation = instr->hydrogen()->representation(); 3244 Representation representation = instr->hydrogen()->representation();
(...skipping 17 matching lines...) Expand all
3277 3262
3278 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, 3263 mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3279 instr->hydrogen()->elements_kind(), 3264 instr->hydrogen()->elements_kind(),
3280 representation, instr->base_offset()); 3265 representation, instr->base_offset());
3281 } 3266 }
3282 3267
3283 __ Load(result, mem_op, representation); 3268 __ Load(result, mem_op, representation);
3284 3269
3285 if (instr->hydrogen()->RequiresHoleCheck()) { 3270 if (instr->hydrogen()->RequiresHoleCheck()) {
3286 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 3271 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3287 DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi); 3272 DeoptimizeIfNotSmi(result, instr, DeoptimizeReason::kNotASmi);
3288 } else { 3273 } else {
3289 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, 3274 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
3290 Deoptimizer::kHole); 3275 DeoptimizeReason::kHole);
3291 } 3276 }
3292 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { 3277 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3293 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); 3278 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3294 Label done; 3279 Label done;
3295 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); 3280 __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3296 __ B(ne, &done); 3281 __ B(ne, &done);
3297 if (info()->IsStub()) { 3282 if (info()->IsStub()) {
3298 // A stub can safely convert the hole to undefined only if the array 3283 // A stub can safely convert the hole to undefined only if the array
3299 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise 3284 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3300 // it needs to bail out. 3285 // it needs to bail out.
3301 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); 3286 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3302 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); 3287 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3303 __ Cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid))); 3288 __ Cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
3304 DeoptimizeIf(ne, instr, Deoptimizer::kHole); 3289 DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
3305 } 3290 }
3306 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 3291 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3307 __ Bind(&done); 3292 __ Bind(&done);
3308 } 3293 }
3309 } 3294 }
3310 3295
3311 3296
3312 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3297 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3313 DCHECK(ToRegister(instr->context()).is(cp)); 3298 DCHECK(ToRegister(instr->context()).is(cp));
3314 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 3299 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
3388 if (r.IsDouble()) { 3373 if (r.IsDouble()) {
3389 DoubleRegister input = ToDoubleRegister(instr->value()); 3374 DoubleRegister input = ToDoubleRegister(instr->value());
3390 DoubleRegister result = ToDoubleRegister(instr->result()); 3375 DoubleRegister result = ToDoubleRegister(instr->result());
3391 __ Fabs(result, input); 3376 __ Fabs(result, input);
3392 } else if (r.IsSmi() || r.IsInteger32()) { 3377 } else if (r.IsSmi() || r.IsInteger32()) {
3393 Register input = r.IsSmi() ? ToRegister(instr->value()) 3378 Register input = r.IsSmi() ? ToRegister(instr->value())
3394 : ToRegister32(instr->value()); 3379 : ToRegister32(instr->value());
3395 Register result = r.IsSmi() ? ToRegister(instr->result()) 3380 Register result = r.IsSmi() ? ToRegister(instr->result())
3396 : ToRegister32(instr->result()); 3381 : ToRegister32(instr->result());
3397 __ Abs(result, input); 3382 __ Abs(result, input);
3398 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 3383 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
3399 } 3384 }
3400 } 3385 }
3401 3386
3402 3387
3403 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr, 3388 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3404 Label* exit, 3389 Label* exit,
3405 Label* allocation_entry) { 3390 Label* allocation_entry) {
3406 // Handle the tricky cases of MathAbsTagged: 3391 // Handle the tricky cases of MathAbsTagged:
3407 // - HeapNumber inputs. 3392 // - HeapNumber inputs.
3408 // - Negative inputs produce a positive result, so a new HeapNumber is 3393 // - Negative inputs produce a positive result, so a new HeapNumber is
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after
3546 3531
3547 __ Frintm(result, input); 3532 __ Frintm(result, input);
3548 } 3533 }
3549 3534
3550 3535
3551 void LCodeGen::DoMathFloorI(LMathFloorI* instr) { 3536 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3552 DoubleRegister input = ToDoubleRegister(instr->value()); 3537 DoubleRegister input = ToDoubleRegister(instr->value());
3553 Register result = ToRegister(instr->result()); 3538 Register result = ToRegister(instr->result());
3554 3539
3555 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3540 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3556 DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero); 3541 DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero);
3557 } 3542 }
3558 3543
3559 __ Fcvtms(result, input); 3544 __ Fcvtms(result, input);
3560 3545
3561 // Check that the result fits into a 32-bit integer. 3546 // Check that the result fits into a 32-bit integer.
3562 // - The result did not overflow. 3547 // - The result did not overflow.
3563 __ Cmp(result, Operand(result, SXTW)); 3548 __ Cmp(result, Operand(result, SXTW));
3564 // - The input was not NaN. 3549 // - The input was not NaN.
3565 __ Fccmp(input, input, NoFlag, eq); 3550 __ Fccmp(input, input, NoFlag, eq);
3566 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 3551 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
3567 } 3552 }
3568 3553
3569 3554
3570 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 3555 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3571 Register dividend = ToRegister32(instr->dividend()); 3556 Register dividend = ToRegister32(instr->dividend());
3572 Register result = ToRegister32(instr->result()); 3557 Register result = ToRegister32(instr->result());
3573 int32_t divisor = instr->divisor(); 3558 int32_t divisor = instr->divisor();
3574 3559
3575 // If the divisor is 1, return the dividend. 3560 // If the divisor is 1, return the dividend.
3576 if (divisor == 1) { 3561 if (divisor == 1) {
3577 __ Mov(result, dividend, kDiscardForSameWReg); 3562 __ Mov(result, dividend, kDiscardForSameWReg);
3578 return; 3563 return;
3579 } 3564 }
3580 3565
3581 // If the divisor is positive, things are easy: There can be no deopts and we 3566 // If the divisor is positive, things are easy: There can be no deopts and we
3582 // can simply do an arithmetic right shift. 3567 // can simply do an arithmetic right shift.
3583 int32_t shift = WhichPowerOf2Abs(divisor); 3568 int32_t shift = WhichPowerOf2Abs(divisor);
3584 if (divisor > 1) { 3569 if (divisor > 1) {
3585 __ Mov(result, Operand(dividend, ASR, shift)); 3570 __ Mov(result, Operand(dividend, ASR, shift));
3586 return; 3571 return;
3587 } 3572 }
3588 3573
3589 // If the divisor is negative, we have to negate and handle edge cases. 3574 // If the divisor is negative, we have to negate and handle edge cases.
3590 __ Negs(result, dividend); 3575 __ Negs(result, dividend);
3591 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3576 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3592 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 3577 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
3593 } 3578 }
3594 3579
3595 // Dividing by -1 is basically negation, unless we overflow. 3580 // Dividing by -1 is basically negation, unless we overflow.
3596 if (divisor == -1) { 3581 if (divisor == -1) {
3597 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 3582 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3598 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 3583 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
3599 } 3584 }
3600 return; 3585 return;
3601 } 3586 }
3602 3587
3603 // If the negation could not overflow, simply shifting is OK. 3588 // If the negation could not overflow, simply shifting is OK.
3604 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 3589 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3605 __ Mov(result, Operand(dividend, ASR, shift)); 3590 __ Mov(result, Operand(dividend, ASR, shift));
3606 return; 3591 return;
3607 } 3592 }
3608 3593
3609 __ Asr(result, result, shift); 3594 __ Asr(result, result, shift);
3610 __ Csel(result, result, kMinInt / divisor, vc); 3595 __ Csel(result, result, kMinInt / divisor, vc);
3611 } 3596 }
3612 3597
3613 3598
3614 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 3599 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3615 Register dividend = ToRegister32(instr->dividend()); 3600 Register dividend = ToRegister32(instr->dividend());
3616 int32_t divisor = instr->divisor(); 3601 int32_t divisor = instr->divisor();
3617 Register result = ToRegister32(instr->result()); 3602 Register result = ToRegister32(instr->result());
3618 DCHECK(!AreAliased(dividend, result)); 3603 DCHECK(!AreAliased(dividend, result));
3619 3604
3620 if (divisor == 0) { 3605 if (divisor == 0) {
3621 Deoptimize(instr, Deoptimizer::kDivisionByZero); 3606 Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
3622 return; 3607 return;
3623 } 3608 }
3624 3609
3625 // Check for (0 / -x) that will produce negative zero. 3610 // Check for (0 / -x) that will produce negative zero.
3626 HMathFloorOfDiv* hdiv = instr->hydrogen(); 3611 HMathFloorOfDiv* hdiv = instr->hydrogen();
3627 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 3612 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
3628 DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero); 3613 DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero);
3629 } 3614 }
3630 3615
3631 // Easy case: We need no dynamic check for the dividend and the flooring 3616 // Easy case: We need no dynamic check for the dividend and the flooring
3632 // division is the same as the truncating division. 3617 // division is the same as the truncating division.
3633 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 3618 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
3634 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 3619 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
3635 __ TruncatingDiv(result, dividend, Abs(divisor)); 3620 __ TruncatingDiv(result, dividend, Abs(divisor));
3636 if (divisor < 0) __ Neg(result, result); 3621 if (divisor < 0) __ Neg(result, result);
3637 return; 3622 return;
3638 } 3623 }
(...skipping 22 matching lines...) Expand all
3661 Register dividend = ToRegister32(instr->dividend()); 3646 Register dividend = ToRegister32(instr->dividend());
3662 Register divisor = ToRegister32(instr->divisor()); 3647 Register divisor = ToRegister32(instr->divisor());
3663 Register remainder = ToRegister32(instr->temp()); 3648 Register remainder = ToRegister32(instr->temp());
3664 Register result = ToRegister32(instr->result()); 3649 Register result = ToRegister32(instr->result());
3665 3650
3666 // This can't cause an exception on ARM, so we can speculatively 3651 // This can't cause an exception on ARM, so we can speculatively
3667 // execute it already now. 3652 // execute it already now.
3668 __ Sdiv(result, dividend, divisor); 3653 __ Sdiv(result, dividend, divisor);
3669 3654
3670 // Check for x / 0. 3655 // Check for x / 0.
3671 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); 3656 DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
3672 3657
3673 // Check for (kMinInt / -1). 3658 // Check for (kMinInt / -1).
3674 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 3659 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
3675 // The V flag will be set iff dividend == kMinInt. 3660 // The V flag will be set iff dividend == kMinInt.
3676 __ Cmp(dividend, 1); 3661 __ Cmp(dividend, 1);
3677 __ Ccmp(divisor, -1, NoFlag, vs); 3662 __ Ccmp(divisor, -1, NoFlag, vs);
3678 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 3663 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
3679 } 3664 }
3680 3665
3681 // Check for (0 / -x) that will produce negative zero. 3666 // Check for (0 / -x) that will produce negative zero.
3682 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3667 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3683 __ Cmp(divisor, 0); 3668 __ Cmp(divisor, 0);
3684 __ Ccmp(dividend, 0, ZFlag, mi); 3669 __ Ccmp(dividend, 0, ZFlag, mi);
3685 // "divisor" can't be null because the code would have already been 3670 // "divisor" can't be null because the code would have already been
3686 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). 3671 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
3687 // In this case we need to deoptimize to produce a -0. 3672 // In this case we need to deoptimize to produce a -0.
3688 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 3673 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
3689 } 3674 }
3690 3675
3691 Label done; 3676 Label done;
3692 // If both operands have the same sign then we are done. 3677 // If both operands have the same sign then we are done.
3693 __ Eor(remainder, dividend, divisor); 3678 __ Eor(remainder, dividend, divisor);
3694 __ Tbz(remainder, kWSignBit, &done); 3679 __ Tbz(remainder, kWSignBit, &done);
3695 3680
3696 // Check if the result needs to be corrected. 3681 // Check if the result needs to be corrected.
3697 __ Msub(remainder, result, divisor, dividend); 3682 __ Msub(remainder, result, divisor, dividend);
3698 __ Cbz(remainder, &done); 3683 __ Cbz(remainder, &done);
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after
3836 // result fits in 32 bits. 3821 // result fits in 32 bits.
3837 __ Cmp(result, Operand(result.W(), SXTW)); 3822 __ Cmp(result, Operand(result.W(), SXTW));
3838 __ Ccmp(result, 1, ZFlag, eq); 3823 __ Ccmp(result, 1, ZFlag, eq);
3839 __ B(hi, &done); 3824 __ B(hi, &done);
3840 3825
3841 // At this point, we have to handle possible inputs of NaN or numbers in the 3826 // At this point, we have to handle possible inputs of NaN or numbers in the
3842 // range [-0.5, 1.5[, or numbers larger than 32 bits. 3827 // range [-0.5, 1.5[, or numbers larger than 32 bits.
3843 3828
3844 // Deoptimize if the result > 1, as it must be larger than 32 bits. 3829 // Deoptimize if the result > 1, as it must be larger than 32 bits.
3845 __ Cmp(result, 1); 3830 __ Cmp(result, 1);
3846 DeoptimizeIf(hi, instr, Deoptimizer::kOverflow); 3831 DeoptimizeIf(hi, instr, DeoptimizeReason::kOverflow);
3847 3832
3848 // Deoptimize for negative inputs, which at this point are only numbers in 3833 // Deoptimize for negative inputs, which at this point are only numbers in
3849 // the range [-0.5, -0.0] 3834 // the range [-0.5, -0.0]
3850 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3835 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3851 __ Fmov(result, input); 3836 __ Fmov(result, input);
3852 DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero); 3837 DeoptimizeIfNegative(result, instr, DeoptimizeReason::kMinusZero);
3853 } 3838 }
3854 3839
3855 // Deoptimize if the input was NaN. 3840 // Deoptimize if the input was NaN.
3856 __ Fcmp(input, dot_five); 3841 __ Fcmp(input, dot_five);
3857 DeoptimizeIf(vs, instr, Deoptimizer::kNaN); 3842 DeoptimizeIf(vs, instr, DeoptimizeReason::kNaN);
3858 3843
3859 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[ 3844 // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
3860 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1, 3845 // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
3861 // else 0; we avoid dealing with 0.499...94 directly. 3846 // else 0; we avoid dealing with 0.499...94 directly.
3862 __ Cset(result, ge); 3847 __ Cset(result, ge);
3863 __ Bind(&done); 3848 __ Bind(&done);
3864 } 3849 }
3865 3850
3866 3851
3867 void LCodeGen::DoMathFround(LMathFround* instr) { 3852 void LCodeGen::DoMathFround(LMathFround* instr) {
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
3925 HMod* hmod = instr->hydrogen(); 3910 HMod* hmod = instr->hydrogen();
3926 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 3911 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
3927 Label dividend_is_not_negative, done; 3912 Label dividend_is_not_negative, done;
3928 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 3913 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
3929 __ Tbz(dividend, kWSignBit, &dividend_is_not_negative); 3914 __ Tbz(dividend, kWSignBit, &dividend_is_not_negative);
3930 // Note that this is correct even for kMinInt operands. 3915 // Note that this is correct even for kMinInt operands.
3931 __ Neg(dividend, dividend); 3916 __ Neg(dividend, dividend);
3932 __ And(dividend, dividend, mask); 3917 __ And(dividend, dividend, mask);
3933 __ Negs(dividend, dividend); 3918 __ Negs(dividend, dividend);
3934 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 3919 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
3935 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); 3920 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
3936 } 3921 }
3937 __ B(&done); 3922 __ B(&done);
3938 } 3923 }
3939 3924
3940 __ bind(&dividend_is_not_negative); 3925 __ bind(&dividend_is_not_negative);
3941 __ And(dividend, dividend, mask); 3926 __ And(dividend, dividend, mask);
3942 __ bind(&done); 3927 __ bind(&done);
3943 } 3928 }
3944 3929
3945 3930
3946 void LCodeGen::DoModByConstI(LModByConstI* instr) { 3931 void LCodeGen::DoModByConstI(LModByConstI* instr) {
3947 Register dividend = ToRegister32(instr->dividend()); 3932 Register dividend = ToRegister32(instr->dividend());
3948 int32_t divisor = instr->divisor(); 3933 int32_t divisor = instr->divisor();
3949 Register result = ToRegister32(instr->result()); 3934 Register result = ToRegister32(instr->result());
3950 Register temp = ToRegister32(instr->temp()); 3935 Register temp = ToRegister32(instr->temp());
3951 DCHECK(!AreAliased(dividend, result, temp)); 3936 DCHECK(!AreAliased(dividend, result, temp));
3952 3937
3953 if (divisor == 0) { 3938 if (divisor == 0) {
3954 Deoptimize(instr, Deoptimizer::kDivisionByZero); 3939 Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
3955 return; 3940 return;
3956 } 3941 }
3957 3942
3958 __ TruncatingDiv(result, dividend, Abs(divisor)); 3943 __ TruncatingDiv(result, dividend, Abs(divisor));
3959 __ Sxtw(dividend.X(), dividend); 3944 __ Sxtw(dividend.X(), dividend);
3960 __ Mov(temp, Abs(divisor)); 3945 __ Mov(temp, Abs(divisor));
3961 __ Smsubl(result.X(), result, temp, dividend.X()); 3946 __ Smsubl(result.X(), result, temp, dividend.X());
3962 3947
3963 // Check for negative zero. 3948 // Check for negative zero.
3964 HMod* hmod = instr->hydrogen(); 3949 HMod* hmod = instr->hydrogen();
3965 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 3950 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
3966 Label remainder_not_zero; 3951 Label remainder_not_zero;
3967 __ Cbnz(result, &remainder_not_zero); 3952 __ Cbnz(result, &remainder_not_zero);
3968 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero); 3953 DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero);
3969 __ bind(&remainder_not_zero); 3954 __ bind(&remainder_not_zero);
3970 } 3955 }
3971 } 3956 }
3972 3957
3973 3958
3974 void LCodeGen::DoModI(LModI* instr) { 3959 void LCodeGen::DoModI(LModI* instr) {
3975 Register dividend = ToRegister32(instr->left()); 3960 Register dividend = ToRegister32(instr->left());
3976 Register divisor = ToRegister32(instr->right()); 3961 Register divisor = ToRegister32(instr->right());
3977 Register result = ToRegister32(instr->result()); 3962 Register result = ToRegister32(instr->result());
3978 3963
3979 Label done; 3964 Label done;
3980 // modulo = dividend - quotient * divisor 3965 // modulo = dividend - quotient * divisor
3981 __ Sdiv(result, dividend, divisor); 3966 __ Sdiv(result, dividend, divisor);
3982 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { 3967 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
3983 DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); 3968 DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
3984 } 3969 }
3985 __ Msub(result, result, divisor, dividend); 3970 __ Msub(result, result, divisor, dividend);
3986 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3971 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3987 __ Cbnz(result, &done); 3972 __ Cbnz(result, &done);
3988 DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero); 3973 DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero);
3989 } 3974 }
3990 __ Bind(&done); 3975 __ Bind(&done);
3991 } 3976 }
3992 3977
3993 3978
3994 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { 3979 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
3995 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32()); 3980 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
3996 bool is_smi = instr->hydrogen()->representation().IsSmi(); 3981 bool is_smi = instr->hydrogen()->representation().IsSmi();
3997 Register result = 3982 Register result =
3998 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); 3983 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
3999 Register left = 3984 Register left =
4000 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()); 3985 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left());
4001 int32_t right = ToInteger32(instr->right()); 3986 int32_t right = ToInteger32(instr->right());
4002 DCHECK((right > -kMaxInt) && (right < kMaxInt)); 3987 DCHECK((right > -kMaxInt) && (right < kMaxInt));
4003 3988
4004 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 3989 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4005 bool bailout_on_minus_zero = 3990 bool bailout_on_minus_zero =
4006 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 3991 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4007 3992
4008 if (bailout_on_minus_zero) { 3993 if (bailout_on_minus_zero) {
4009 if (right < 0) { 3994 if (right < 0) {
4010 // The result is -0 if right is negative and left is zero. 3995 // The result is -0 if right is negative and left is zero.
4011 DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero); 3996 DeoptimizeIfZero(left, instr, DeoptimizeReason::kMinusZero);
4012 } else if (right == 0) { 3997 } else if (right == 0) {
4013 // The result is -0 if the right is zero and the left is negative. 3998 // The result is -0 if the right is zero and the left is negative.
4014 DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero); 3999 DeoptimizeIfNegative(left, instr, DeoptimizeReason::kMinusZero);
4015 } 4000 }
4016 } 4001 }
4017 4002
4018 switch (right) { 4003 switch (right) {
4019 // Cases which can detect overflow. 4004 // Cases which can detect overflow.
4020 case -1: 4005 case -1:
4021 if (can_overflow) { 4006 if (can_overflow) {
4022 // Only 0x80000000 can overflow here. 4007 // Only 0x80000000 can overflow here.
4023 __ Negs(result, left); 4008 __ Negs(result, left);
4024 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 4009 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
4025 } else { 4010 } else {
4026 __ Neg(result, left); 4011 __ Neg(result, left);
4027 } 4012 }
4028 break; 4013 break;
4029 case 0: 4014 case 0:
4030 // This case can never overflow. 4015 // This case can never overflow.
4031 __ Mov(result, 0); 4016 __ Mov(result, 0);
4032 break; 4017 break;
4033 case 1: 4018 case 1:
4034 // This case can never overflow. 4019 // This case can never overflow.
4035 __ Mov(result, left, kDiscardForSameWReg); 4020 __ Mov(result, left, kDiscardForSameWReg);
4036 break; 4021 break;
4037 case 2: 4022 case 2:
4038 if (can_overflow) { 4023 if (can_overflow) {
4039 __ Adds(result, left, left); 4024 __ Adds(result, left, left);
4040 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 4025 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
4041 } else { 4026 } else {
4042 __ Add(result, left, left); 4027 __ Add(result, left, left);
4043 } 4028 }
4044 break; 4029 break;
4045 4030
4046 default: 4031 default:
4047 // Multiplication by constant powers of two (and some related values) 4032 // Multiplication by constant powers of two (and some related values)
4048 // can be done efficiently with shifted operands. 4033 // can be done efficiently with shifted operands.
4049 int32_t right_abs = Abs(right); 4034 int32_t right_abs = Abs(right);
4050 4035
4051 if (base::bits::IsPowerOfTwo32(right_abs)) { 4036 if (base::bits::IsPowerOfTwo32(right_abs)) {
4052 int right_log2 = WhichPowerOf2(right_abs); 4037 int right_log2 = WhichPowerOf2(right_abs);
4053 4038
4054 if (can_overflow) { 4039 if (can_overflow) {
4055 Register scratch = result; 4040 Register scratch = result;
4056 DCHECK(!AreAliased(scratch, left)); 4041 DCHECK(!AreAliased(scratch, left));
4057 __ Cls(scratch, left); 4042 __ Cls(scratch, left);
4058 __ Cmp(scratch, right_log2); 4043 __ Cmp(scratch, right_log2);
4059 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow); 4044 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow);
4060 } 4045 }
4061 4046
4062 if (right >= 0) { 4047 if (right >= 0) {
4063 // result = left << log2(right) 4048 // result = left << log2(right)
4064 __ Lsl(result, left, right_log2); 4049 __ Lsl(result, left, right_log2);
4065 } else { 4050 } else {
4066 // result = -left << log2(-right) 4051 // result = -left << log2(-right)
4067 if (can_overflow) { 4052 if (can_overflow) {
4068 __ Negs(result, Operand(left, LSL, right_log2)); 4053 __ Negs(result, Operand(left, LSL, right_log2));
4069 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 4054 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
4070 } else { 4055 } else {
4071 __ Neg(result, Operand(left, LSL, right_log2)); 4056 __ Neg(result, Operand(left, LSL, right_log2));
4072 } 4057 }
4073 } 4058 }
4074 return; 4059 return;
4075 } 4060 }
4076 4061
4077 4062
4078 // For the following cases, we could perform a conservative overflow check 4063 // For the following cases, we could perform a conservative overflow check
4079 // with CLS as above. However the few cycles saved are likely not worth 4064 // with CLS as above. However the few cycles saved are likely not worth
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
4117 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 4102 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4118 4103
4119 if (bailout_on_minus_zero && !left.Is(right)) { 4104 if (bailout_on_minus_zero && !left.Is(right)) {
4120 // If one operand is zero and the other is negative, the result is -0. 4105 // If one operand is zero and the other is negative, the result is -0.
4121 // - Set Z (eq) if either left or right, or both, are 0. 4106 // - Set Z (eq) if either left or right, or both, are 0.
4122 __ Cmp(left, 0); 4107 __ Cmp(left, 0);
4123 __ Ccmp(right, 0, ZFlag, ne); 4108 __ Ccmp(right, 0, ZFlag, ne);
4124 // - If so (eq), set N (mi) if left + right is negative. 4109 // - If so (eq), set N (mi) if left + right is negative.
4125 // - Otherwise, clear N. 4110 // - Otherwise, clear N.
4126 __ Ccmn(left, right, NoFlag, eq); 4111 __ Ccmn(left, right, NoFlag, eq);
4127 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); 4112 DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
4128 } 4113 }
4129 4114
4130 if (can_overflow) { 4115 if (can_overflow) {
4131 __ Smull(result.X(), left, right); 4116 __ Smull(result.X(), left, right);
4132 __ Cmp(result.X(), Operand(result, SXTW)); 4117 __ Cmp(result.X(), Operand(result, SXTW));
4133 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 4118 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
4134 } else { 4119 } else {
4135 __ Mul(result, left, right); 4120 __ Mul(result, left, right);
4136 } 4121 }
4137 } 4122 }
4138 4123
4139 4124
4140 void LCodeGen::DoMulS(LMulS* instr) { 4125 void LCodeGen::DoMulS(LMulS* instr) {
4141 Register result = ToRegister(instr->result()); 4126 Register result = ToRegister(instr->result());
4142 Register left = ToRegister(instr->left()); 4127 Register left = ToRegister(instr->left());
4143 Register right = ToRegister(instr->right()); 4128 Register right = ToRegister(instr->right());
4144 4129
4145 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 4130 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4146 bool bailout_on_minus_zero = 4131 bool bailout_on_minus_zero =
4147 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 4132 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4148 4133
4149 if (bailout_on_minus_zero && !left.Is(right)) { 4134 if (bailout_on_minus_zero && !left.Is(right)) {
4150 // If one operand is zero and the other is negative, the result is -0. 4135 // If one operand is zero and the other is negative, the result is -0.
4151 // - Set Z (eq) if either left or right, or both, are 0. 4136 // - Set Z (eq) if either left or right, or both, are 0.
4152 __ Cmp(left, 0); 4137 __ Cmp(left, 0);
4153 __ Ccmp(right, 0, ZFlag, ne); 4138 __ Ccmp(right, 0, ZFlag, ne);
4154 // - If so (eq), set N (mi) if left + right is negative. 4139 // - If so (eq), set N (mi) if left + right is negative.
4155 // - Otherwise, clear N. 4140 // - Otherwise, clear N.
4156 __ Ccmn(left, right, NoFlag, eq); 4141 __ Ccmn(left, right, NoFlag, eq);
4157 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); 4142 DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
4158 } 4143 }
4159 4144
4160 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); 4145 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4161 if (can_overflow) { 4146 if (can_overflow) {
4162 __ Smulh(result, left, right); 4147 __ Smulh(result, left, right);
4163 __ Cmp(result, Operand(result.W(), SXTW)); 4148 __ Cmp(result, Operand(result.W(), SXTW));
4164 __ SmiTag(result); 4149 __ SmiTag(result);
4165 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 4150 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
4166 } else { 4151 } else {
4167 if (AreAliased(result, left, right)) { 4152 if (AreAliased(result, left, right)) {
4168 // All three registers are the same: half untag the input and then 4153 // All three registers are the same: half untag the input and then
4169 // multiply, giving a tagged result. 4154 // multiply, giving a tagged result.
4170 STATIC_ASSERT((kSmiShift % 2) == 0); 4155 STATIC_ASSERT((kSmiShift % 2) == 0);
4171 __ Asr(result, left, kSmiShift / 2); 4156 __ Asr(result, left, kSmiShift / 2);
4172 __ Mul(result, result, result); 4157 __ Mul(result, result, result);
4173 } else if (result.Is(left) && !left.Is(right)) { 4158 } else if (result.Is(left) && !left.Is(right)) {
4174 // Registers result and left alias, right is distinct: untag left into 4159 // Registers result and left alias, right is distinct: untag left into
4175 // result, and then multiply by right, giving a tagged result. 4160 // result, and then multiply by right, giving a tagged result.
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
4326 // Heap number map check. 4311 // Heap number map check.
4327 if (can_convert_undefined_to_nan) { 4312 if (can_convert_undefined_to_nan) {
4328 __ JumpIfNotHeapNumber(input, &convert_undefined); 4313 __ JumpIfNotHeapNumber(input, &convert_undefined);
4329 } else { 4314 } else {
4330 DeoptimizeIfNotHeapNumber(input, instr); 4315 DeoptimizeIfNotHeapNumber(input, instr);
4331 } 4316 }
4332 4317
4333 // Load heap number. 4318 // Load heap number.
4334 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); 4319 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4335 if (instr->hydrogen()->deoptimize_on_minus_zero()) { 4320 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4336 DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero); 4321 DeoptimizeIfMinusZero(result, instr, DeoptimizeReason::kMinusZero);
4337 } 4322 }
4338 __ B(&done); 4323 __ B(&done);
4339 4324
4340 if (can_convert_undefined_to_nan) { 4325 if (can_convert_undefined_to_nan) {
4341 __ Bind(&convert_undefined); 4326 __ Bind(&convert_undefined);
4342 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, 4327 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
4343 Deoptimizer::kNotAHeapNumberUndefined); 4328 DeoptimizeReason::kNotAHeapNumberUndefined);
4344 4329
4345 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 4330 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4346 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); 4331 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4347 __ B(&done); 4332 __ B(&done);
4348 } 4333 }
4349 4334
4350 } else { 4335 } else {
4351 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 4336 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4352 // Fall through to load_smi. 4337 // Fall through to load_smi.
4353 } 4338 }
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
4521 } 4506 }
4522 } 4507 }
4523 4508
4524 4509
4525 void LCodeGen::DoSmiTag(LSmiTag* instr) { 4510 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4526 HChange* hchange = instr->hydrogen(); 4511 HChange* hchange = instr->hydrogen();
4527 Register input = ToRegister(instr->value()); 4512 Register input = ToRegister(instr->value());
4528 Register output = ToRegister(instr->result()); 4513 Register output = ToRegister(instr->result());
4529 if (hchange->CheckFlag(HValue::kCanOverflow) && 4514 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4530 hchange->value()->CheckFlag(HValue::kUint32)) { 4515 hchange->value()->CheckFlag(HValue::kUint32)) {
4531 DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow); 4516 DeoptimizeIfNegative(input.W(), instr, DeoptimizeReason::kOverflow);
4532 } 4517 }
4533 __ SmiTag(output, input); 4518 __ SmiTag(output, input);
4534 } 4519 }
4535 4520
4536 4521
4537 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4522 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4538 Register input = ToRegister(instr->value()); 4523 Register input = ToRegister(instr->value());
4539 Register result = ToRegister(instr->result()); 4524 Register result = ToRegister(instr->result());
4540 Label done, untag; 4525 Label done, untag;
4541 4526
4542 if (instr->needs_check()) { 4527 if (instr->needs_check()) {
4543 DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi); 4528 DeoptimizeIfNotSmi(input, instr, DeoptimizeReason::kNotASmi);
4544 } 4529 }
4545 4530
4546 __ Bind(&untag); 4531 __ Bind(&untag);
4547 __ SmiUntag(result, input); 4532 __ SmiUntag(result, input);
4548 __ Bind(&done); 4533 __ Bind(&done);
4549 } 4534 }
4550 4535
4551 4536
4552 void LCodeGen::DoShiftI(LShiftI* instr) { 4537 void LCodeGen::DoShiftI(LShiftI* instr) {
4553 LOperand* right_op = instr->right(); 4538 LOperand* right_op = instr->right();
4554 Register left = ToRegister32(instr->left()); 4539 Register left = ToRegister32(instr->left());
4555 Register result = ToRegister32(instr->result()); 4540 Register result = ToRegister32(instr->result());
4556 4541
4557 if (right_op->IsRegister()) { 4542 if (right_op->IsRegister()) {
4558 Register right = ToRegister32(instr->right()); 4543 Register right = ToRegister32(instr->right());
4559 switch (instr->op()) { 4544 switch (instr->op()) {
4560 case Token::ROR: __ Ror(result, left, right); break; 4545 case Token::ROR: __ Ror(result, left, right); break;
4561 case Token::SAR: __ Asr(result, left, right); break; 4546 case Token::SAR: __ Asr(result, left, right); break;
4562 case Token::SHL: __ Lsl(result, left, right); break; 4547 case Token::SHL: __ Lsl(result, left, right); break;
4563 case Token::SHR: 4548 case Token::SHR:
4564 __ Lsr(result, left, right); 4549 __ Lsr(result, left, right);
4565 if (instr->can_deopt()) { 4550 if (instr->can_deopt()) {
4566 // If `left >>> right` >= 0x80000000, the result is not representable 4551 // If `left >>> right` >= 0x80000000, the result is not representable
4567 // in a signed 32-bit smi. 4552 // in a signed 32-bit smi.
4568 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue); 4553 DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue);
4569 } 4554 }
4570 break; 4555 break;
4571 default: UNREACHABLE(); 4556 default: UNREACHABLE();
4572 } 4557 }
4573 } else { 4558 } else {
4574 DCHECK(right_op->IsConstantOperand()); 4559 DCHECK(right_op->IsConstantOperand());
4575 int shift_count = JSShiftAmountFromLConstant(right_op); 4560 int shift_count = JSShiftAmountFromLConstant(right_op);
4576 if (shift_count == 0) { 4561 if (shift_count == 0) {
4577 if ((instr->op() == Token::SHR) && instr->can_deopt()) { 4562 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4578 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue); 4563 DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue);
4579 } 4564 }
4580 __ Mov(result, left, kDiscardForSameWReg); 4565 __ Mov(result, left, kDiscardForSameWReg);
4581 } else { 4566 } else {
4582 switch (instr->op()) { 4567 switch (instr->op()) {
4583 case Token::ROR: __ Ror(result, left, shift_count); break; 4568 case Token::ROR: __ Ror(result, left, shift_count); break;
4584 case Token::SAR: __ Asr(result, left, shift_count); break; 4569 case Token::SAR: __ Asr(result, left, shift_count); break;
4585 case Token::SHL: __ Lsl(result, left, shift_count); break; 4570 case Token::SHL: __ Lsl(result, left, shift_count); break;
4586 case Token::SHR: __ Lsr(result, left, shift_count); break; 4571 case Token::SHR: __ Lsr(result, left, shift_count); break;
4587 default: UNREACHABLE(); 4572 default: UNREACHABLE();
4588 } 4573 }
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
4621 break; 4606 break;
4622 case Token::SHL: 4607 case Token::SHL:
4623 __ Lsl(result, left, result); 4608 __ Lsl(result, left, result);
4624 break; 4609 break;
4625 case Token::SHR: 4610 case Token::SHR:
4626 __ Lsr(result, left, result); 4611 __ Lsr(result, left, result);
4627 __ Bic(result, result, kSmiShiftMask); 4612 __ Bic(result, result, kSmiShiftMask);
4628 if (instr->can_deopt()) { 4613 if (instr->can_deopt()) {
4629 // If `left >>> right` >= 0x80000000, the result is not representable 4614 // If `left >>> right` >= 0x80000000, the result is not representable
4630 // in a signed 32-bit smi. 4615 // in a signed 32-bit smi.
4631 DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue); 4616 DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue);
4632 } 4617 }
4633 break; 4618 break;
4634 default: UNREACHABLE(); 4619 default: UNREACHABLE();
4635 } 4620 }
4636 } else { 4621 } else {
4637 DCHECK(right_op->IsConstantOperand()); 4622 DCHECK(right_op->IsConstantOperand());
4638 int shift_count = JSShiftAmountFromLConstant(right_op); 4623 int shift_count = JSShiftAmountFromLConstant(right_op);
4639 if (shift_count == 0) { 4624 if (shift_count == 0) {
4640 if ((instr->op() == Token::SHR) && instr->can_deopt()) { 4625 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4641 DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue); 4626 DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue);
4642 } 4627 }
4643 __ Mov(result, left); 4628 __ Mov(result, left);
4644 } else { 4629 } else {
4645 switch (instr->op()) { 4630 switch (instr->op()) {
4646 case Token::ROR: 4631 case Token::ROR:
4647 __ SmiUntag(result, left); 4632 __ SmiUntag(result, left);
4648 __ Ror(result.W(), result.W(), shift_count); 4633 __ Ror(result.W(), result.W(), shift_count);
4649 __ SmiTag(result); 4634 __ SmiTag(result);
4650 break; 4635 break;
4651 case Token::SAR: 4636 case Token::SAR:
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
4762 Register value = ToRegister(instr->value()); 4747 Register value = ToRegister(instr->value());
4763 Register scratch = ToRegister(instr->temp()); 4748 Register scratch = ToRegister(instr->temp());
4764 MemOperand target = ContextMemOperand(context, instr->slot_index()); 4749 MemOperand target = ContextMemOperand(context, instr->slot_index());
4765 4750
4766 Label skip_assignment; 4751 Label skip_assignment;
4767 4752
4768 if (instr->hydrogen()->RequiresHoleCheck()) { 4753 if (instr->hydrogen()->RequiresHoleCheck()) {
4769 __ Ldr(scratch, target); 4754 __ Ldr(scratch, target);
4770 if (instr->hydrogen()->DeoptimizesOnHole()) { 4755 if (instr->hydrogen()->DeoptimizesOnHole()) {
4771 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, 4756 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
4772 Deoptimizer::kHole); 4757 DeoptimizeReason::kHole);
4773 } else { 4758 } else {
4774 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); 4759 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
4775 } 4760 }
4776 } 4761 }
4777 4762
4778 __ Str(value, target); 4763 __ Str(value, target);
4779 if (instr->hydrogen()->NeedsWriteBarrier()) { 4764 if (instr->hydrogen()->NeedsWriteBarrier()) {
4780 SmiCheck check_needed = 4765 SmiCheck check_needed =
4781 instr->hydrogen()->value()->type().IsHeapObject() 4766 instr->hydrogen()->value()->type().IsHeapObject()
4782 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4767 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
(...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after
5036 5021
5037 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(), 5022 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
5038 instr->hydrogen()->kind()); 5023 instr->hydrogen()->kind());
5039 __ CallStub(&stub); 5024 __ CallStub(&stub);
5040 RecordSafepointWithLazyDeopt( 5025 RecordSafepointWithLazyDeopt(
5041 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 5026 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5042 __ StoreToSafepointRegisterSlot(result, result); 5027 __ StoreToSafepointRegisterSlot(result, result);
5043 } 5028 }
5044 5029
5045 // Deopt on smi, which means the elements array changed to dictionary mode. 5030 // Deopt on smi, which means the elements array changed to dictionary mode.
5046 DeoptimizeIfSmi(result, instr, Deoptimizer::kSmi); 5031 DeoptimizeIfSmi(result, instr, DeoptimizeReason::kSmi);
5047 } 5032 }
5048 5033
5049 5034
5050 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 5035 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5051 Representation representation = instr->representation(); 5036 Representation representation = instr->representation();
5052 5037
5053 Register object = ToRegister(instr->object()); 5038 Register object = ToRegister(instr->object());
5054 HObjectAccess access = instr->hydrogen()->access(); 5039 HObjectAccess access = instr->hydrogen()->access();
5055 int offset = access.offset(); 5040 int offset = access.offset();
5056 5041
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after
5278 5263
5279 5264
5280 void LCodeGen::DoSubI(LSubI* instr) { 5265 void LCodeGen::DoSubI(LSubI* instr) {
5281 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 5266 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5282 Register result = ToRegister32(instr->result()); 5267 Register result = ToRegister32(instr->result());
5283 Register left = ToRegister32(instr->left()); 5268 Register left = ToRegister32(instr->left());
5284 Operand right = ToShiftedRightOperand32(instr->right(), instr); 5269 Operand right = ToShiftedRightOperand32(instr->right(), instr);
5285 5270
5286 if (can_overflow) { 5271 if (can_overflow) {
5287 __ Subs(result, left, right); 5272 __ Subs(result, left, right);
5288 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 5273 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
5289 } else { 5274 } else {
5290 __ Sub(result, left, right); 5275 __ Sub(result, left, right);
5291 } 5276 }
5292 } 5277 }
5293 5278
5294 5279
5295 void LCodeGen::DoSubS(LSubS* instr) { 5280 void LCodeGen::DoSubS(LSubS* instr) {
5296 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 5281 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5297 Register result = ToRegister(instr->result()); 5282 Register result = ToRegister(instr->result());
5298 Register left = ToRegister(instr->left()); 5283 Register left = ToRegister(instr->left());
5299 Operand right = ToOperand(instr->right()); 5284 Operand right = ToOperand(instr->right());
5300 if (can_overflow) { 5285 if (can_overflow) {
5301 __ Subs(result, left, right); 5286 __ Subs(result, left, right);
5302 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); 5287 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
5303 } else { 5288 } else {
5304 __ Sub(result, left, right); 5289 __ Sub(result, left, right);
5305 } 5290 }
5306 } 5291 }
5307 5292
5308 5293
5309 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, 5294 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5310 LOperand* value, 5295 LOperand* value,
5311 LOperand* temp1, 5296 LOperand* temp1,
5312 LOperand* temp2) { 5297 LOperand* temp2) {
(...skipping 20 matching lines...) Expand all
5333 Register false_root = scratch1; 5318 Register false_root = scratch1;
5334 __ LoadTrueFalseRoots(true_root, false_root); 5319 __ LoadTrueFalseRoots(true_root, false_root);
5335 __ Cmp(input, true_root); 5320 __ Cmp(input, true_root);
5336 __ Cset(output, eq); 5321 __ Cset(output, eq);
5337 __ Ccmp(input, false_root, ZFlag, ne); 5322 __ Ccmp(input, false_root, ZFlag, ne);
5338 __ B(eq, &done); 5323 __ B(eq, &done);
5339 5324
5340 // Output contains zero, undefined is converted to zero for truncating 5325 // Output contains zero, undefined is converted to zero for truncating
5341 // conversions. 5326 // conversions.
5342 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, 5327 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
5343 Deoptimizer::kNotAHeapNumberUndefinedBoolean); 5328 DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
5344 } else { 5329 } else {
5345 Register output = ToRegister32(instr->result()); 5330 Register output = ToRegister32(instr->result());
5346 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); 5331 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5347 5332
5348 DeoptimizeIfNotHeapNumber(input, instr); 5333 DeoptimizeIfNotHeapNumber(input, instr);
5349 5334
5350 // A heap number: load value and convert to int32 using non-truncating 5335 // A heap number: load value and convert to int32 using non-truncating
5351 // function. If the result is out of range, branch to deoptimize. 5336 // function. If the result is out of range, branch to deoptimize.
5352 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); 5337 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5353 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); 5338 __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
5354 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); 5339 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
5355 5340
5356 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5341 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5357 __ Cmp(output, 0); 5342 __ Cmp(output, 0);
5358 __ B(ne, &done); 5343 __ B(ne, &done);
5359 __ Fmov(scratch1, dbl_scratch1); 5344 __ Fmov(scratch1, dbl_scratch1);
5360 DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero); 5345 DeoptimizeIfNegative(scratch1, instr, DeoptimizeReason::kMinusZero);
5361 } 5346 }
5362 } 5347 }
5363 __ Bind(&done); 5348 __ Bind(&done);
5364 } 5349 }
5365 5350
5366 5351
5367 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 5352 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5368 class DeferredTaggedToI: public LDeferredCode { 5353 class DeferredTaggedToI: public LDeferredCode {
5369 public: 5354 public:
5370 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 5355 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
5440 } 5425 }
5441 5426
5442 5427
5443 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 5428 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5444 Register object = ToRegister(instr->object()); 5429 Register object = ToRegister(instr->object());
5445 Register temp1 = ToRegister(instr->temp1()); 5430 Register temp1 = ToRegister(instr->temp1());
5446 Register temp2 = ToRegister(instr->temp2()); 5431 Register temp2 = ToRegister(instr->temp2());
5447 5432
5448 Label no_memento_found; 5433 Label no_memento_found;
5449 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); 5434 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
5450 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); 5435 DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
5451 __ Bind(&no_memento_found); 5436 __ Bind(&no_memento_found);
5452 } 5437 }
5453 5438
5454 5439
5455 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) { 5440 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5456 DoubleRegister input = ToDoubleRegister(instr->value()); 5441 DoubleRegister input = ToDoubleRegister(instr->value());
5457 Register result = ToRegister(instr->result()); 5442 Register result = ToRegister(instr->result());
5458 __ TruncateDoubleToI(result, input); 5443 __ TruncateDoubleToI(result, input);
5459 if (instr->tag_result()) { 5444 if (instr->tag_result()) {
5460 __ SmiTag(result, result); 5445 __ SmiTag(result, result);
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
5586 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); 5571 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5587 } 5572 }
5588 5573
5589 5574
5590 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5575 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5591 Register object = ToRegister(instr->value()); 5576 Register object = ToRegister(instr->value());
5592 Register map = ToRegister(instr->map()); 5577 Register map = ToRegister(instr->map());
5593 Register temp = ToRegister(instr->temp()); 5578 Register temp = ToRegister(instr->temp());
5594 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); 5579 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5595 __ Cmp(map, temp); 5580 __ Cmp(map, temp);
5596 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); 5581 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
5597 } 5582 }
5598 5583
5599 5584
5600 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 5585 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5601 Register receiver = ToRegister(instr->receiver()); 5586 Register receiver = ToRegister(instr->receiver());
5602 Register function = ToRegister(instr->function()); 5587 Register function = ToRegister(instr->function());
5603 Register result = ToRegister(instr->result()); 5588 Register result = ToRegister(instr->result());
5604 5589
5605 // If the receiver is null or undefined, we have to pass the global object as 5590 // If the receiver is null or undefined, we have to pass the global object as
5606 // a receiver to normal functions. Values have to be passed unchanged to 5591 // a receiver to normal functions. Values have to be passed unchanged to
(...skipping 13 matching lines...) Expand all
5620 5605
5621 // Do not transform the receiver to object for builtins. 5606 // Do not transform the receiver to object for builtins.
5622 __ Tbnz(result, SharedFunctionInfo::kNative, &copy_receiver); 5607 __ Tbnz(result, SharedFunctionInfo::kNative, &copy_receiver);
5623 } 5608 }
5624 5609
5625 // Normal function. Replace undefined or null with global receiver. 5610 // Normal function. Replace undefined or null with global receiver.
5626 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object); 5611 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5627 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); 5612 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5628 5613
5629 // Deoptimize if the receiver is not a JS object. 5614 // Deoptimize if the receiver is not a JS object.
5630 DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi); 5615 DeoptimizeIfSmi(receiver, instr, DeoptimizeReason::kSmi);
5631 __ CompareObjectType(receiver, result, result, FIRST_JS_RECEIVER_TYPE); 5616 __ CompareObjectType(receiver, result, result, FIRST_JS_RECEIVER_TYPE);
5632 __ B(ge, &copy_receiver); 5617 __ B(ge, &copy_receiver);
5633 Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject); 5618 Deoptimize(instr, DeoptimizeReason::kNotAJavaScriptObject);
5634 5619
5635 __ Bind(&global_object); 5620 __ Bind(&global_object);
5636 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); 5621 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
5637 __ Ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); 5622 __ Ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
5638 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); 5623 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
5639 __ B(&done); 5624 __ B(&done);
5640 5625
5641 __ Bind(&copy_receiver); 5626 __ Bind(&copy_receiver);
5642 __ Mov(result, receiver); 5627 __ Mov(result, receiver);
5643 __ Bind(&done); 5628 __ Bind(&done);
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
5714 // Index is equal to negated out of object property index plus 1. 5699 // Index is equal to negated out of object property index plus 1.
5715 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); 5700 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5716 __ Ldr(result, FieldMemOperand(result, 5701 __ Ldr(result, FieldMemOperand(result,
5717 FixedArray::kHeaderSize - kPointerSize)); 5702 FixedArray::kHeaderSize - kPointerSize));
5718 __ Bind(deferred->exit()); 5703 __ Bind(deferred->exit());
5719 __ Bind(&done); 5704 __ Bind(&done);
5720 } 5705 }
5721 5706
5722 } // namespace internal 5707 } // namespace internal
5723 } // namespace v8 5708 } // namespace v8
OLDNEW
« no previous file with comments | « src/crankshaft/arm64/lithium-codegen-arm64.h ('k') | src/crankshaft/hydrogen.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698