Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(875)

Side by Side Diff: src/a64/lithium-codegen-a64.cc

Issue 204293004: A64: Remove Operand constructors where an implicit constructor can be used. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/ic-a64.cc ('k') | src/a64/lithium-gap-resolver-a64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 783 matching lines...) Expand 10 before | Expand all | Expand 10 after
794 code->instr()->Mnemonic()); 794 code->instr()->Mnemonic());
795 795
796 __ Bind(code->entry()); 796 __ Bind(code->entry());
797 797
798 if (NeedsDeferredFrame()) { 798 if (NeedsDeferredFrame()) {
799 Comment(";;; Build frame"); 799 Comment(";;; Build frame");
800 ASSERT(!frame_is_built_); 800 ASSERT(!frame_is_built_);
801 ASSERT(info()->IsStub()); 801 ASSERT(info()->IsStub());
802 frame_is_built_ = true; 802 frame_is_built_ = true;
803 __ Push(lr, fp, cp); 803 __ Push(lr, fp, cp);
804 __ Mov(fp, Operand(Smi::FromInt(StackFrame::STUB))); 804 __ Mov(fp, Smi::FromInt(StackFrame::STUB));
805 __ Push(fp); 805 __ Push(fp);
806 __ Add(fp, __ StackPointer(), 806 __ Add(fp, __ StackPointer(),
807 StandardFrameConstants::kFixedFrameSizeFromFp); 807 StandardFrameConstants::kFixedFrameSizeFromFp);
808 Comment(";;; Deferred code"); 808 Comment(";;; Deferred code");
809 } 809 }
810 810
811 code->Generate(); 811 code->Generate();
812 812
813 if (NeedsDeferredFrame()) { 813 if (NeedsDeferredFrame()) {
814 Comment(";;; Destroy frame"); 814 Comment(";;; Destroy frame");
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
848 } else { 848 } else {
849 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); 849 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
850 } 850 }
851 if (deopt_jump_table_[i]->needs_frame) { 851 if (deopt_jump_table_[i]->needs_frame) {
852 ASSERT(!info()->saves_caller_doubles()); 852 ASSERT(!info()->saves_caller_doubles());
853 853
854 UseScratchRegisterScope temps(masm()); 854 UseScratchRegisterScope temps(masm());
855 Register stub_deopt_entry = temps.AcquireX(); 855 Register stub_deopt_entry = temps.AcquireX();
856 Register stub_marker = temps.AcquireX(); 856 Register stub_marker = temps.AcquireX();
857 857
858 __ Mov(stub_deopt_entry, 858 __ Mov(stub_deopt_entry, ExternalReference::ForDeoptEntry(entry));
859 Operand(ExternalReference::ForDeoptEntry(entry)));
860 if (needs_frame.is_bound()) { 859 if (needs_frame.is_bound()) {
861 __ B(&needs_frame); 860 __ B(&needs_frame);
862 } else { 861 } else {
863 __ Bind(&needs_frame); 862 __ Bind(&needs_frame);
864 // This variant of deopt can only be used with stubs. Since we don't 863 // This variant of deopt can only be used with stubs. Since we don't
865 // have a function pointer to install in the stack frame that we're 864 // have a function pointer to install in the stack frame that we're
866 // building, install a special marker there instead. 865 // building, install a special marker there instead.
867 ASSERT(info()->IsStub()); 866 ASSERT(info()->IsStub());
868 __ Mov(stub_marker, Operand(Smi::FromInt(StackFrame::STUB))); 867 __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
869 __ Push(lr, fp, cp, stub_marker); 868 __ Push(lr, fp, cp, stub_marker);
870 __ Add(fp, __ StackPointer(), 2 * kPointerSize); 869 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
871 __ Call(stub_deopt_entry); 870 __ Call(stub_deopt_entry);
872 } 871 }
873 } else { 872 } else {
874 if (info()->saves_caller_doubles()) { 873 if (info()->saves_caller_doubles()) {
875 ASSERT(info()->IsStub()); 874 ASSERT(info()->IsStub());
876 RestoreCallerDoubles(); 875 RestoreCallerDoubles();
877 } 876 }
878 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 877 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after
998 if (entry == NULL) { 997 if (entry == NULL) {
999 Abort(kBailoutWasNotPrepared); 998 Abort(kBailoutWasNotPrepared);
1000 } 999 }
1001 1000
1002 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { 1001 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
1003 Label not_zero; 1002 Label not_zero;
1004 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); 1003 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1005 1004
1006 __ Push(x0, x1, x2); 1005 __ Push(x0, x1, x2);
1007 __ Mrs(x2, NZCV); 1006 __ Mrs(x2, NZCV);
1008 __ Mov(x0, Operand(count)); 1007 __ Mov(x0, count);
1009 __ Ldr(w1, MemOperand(x0)); 1008 __ Ldr(w1, MemOperand(x0));
1010 __ Subs(x1, x1, 1); 1009 __ Subs(x1, x1, 1);
1011 __ B(gt, &not_zero); 1010 __ B(gt, &not_zero);
1012 __ Mov(w1, FLAG_deopt_every_n_times); 1011 __ Mov(w1, FLAG_deopt_every_n_times);
1013 __ Str(w1, MemOperand(x0)); 1012 __ Str(w1, MemOperand(x0));
1014 __ Pop(x2, x1, x0); 1013 __ Pop(x2, x1, x0);
1015 ASSERT(frame_is_built_); 1014 ASSERT(frame_is_built_);
1016 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 1015 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1017 __ Unreachable(); 1016 __ Unreachable();
1018 1017
(...skipping 526 matching lines...) Expand 10 before | Expand all | Expand 10 after
1545 } else { 1544 } else {
1546 ASSERT(instr->temp3() == NULL); 1545 ASSERT(instr->temp3() == NULL);
1547 } 1546 }
1548 } 1547 }
1549 1548
1550 1549
1551 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { 1550 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1552 // TODO(3095996): Get rid of this. For now, we need to make the 1551 // TODO(3095996): Get rid of this. For now, we need to make the
1553 // result register contain a valid pointer because it is already 1552 // result register contain a valid pointer because it is already
1554 // contained in the register pointer map. 1553 // contained in the register pointer map.
1555 __ Mov(ToRegister(instr->result()), Operand(Smi::FromInt(0))); 1554 __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
1556 1555
1557 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 1556 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
1558 // We're in a SafepointRegistersScope so we can use any scratch registers. 1557 // We're in a SafepointRegistersScope so we can use any scratch registers.
1559 Register size = x0; 1558 Register size = x0;
1560 if (instr->size()->IsConstantOperand()) { 1559 if (instr->size()->IsConstantOperand()) {
1561 __ Mov(size, Operand(ToSmi(LConstantOperand::cast(instr->size())))); 1560 __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
1562 } else { 1561 } else {
1563 __ SmiTag(size, ToRegister32(instr->size()).X()); 1562 __ SmiTag(size, ToRegister32(instr->size()).X());
1564 } 1563 }
1565 int flags = AllocateDoubleAlignFlag::encode( 1564 int flags = AllocateDoubleAlignFlag::encode(
1566 instr->hydrogen()->MustAllocateDoubleAligned()); 1565 instr->hydrogen()->MustAllocateDoubleAligned());
1567 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 1566 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1568 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); 1567 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
1569 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 1568 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1570 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); 1569 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
1571 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 1570 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1572 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 1571 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1573 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); 1572 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
1574 } else { 1573 } else {
1575 flags = AllocateTargetSpace::update(flags, NEW_SPACE); 1574 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
1576 } 1575 }
1577 __ Mov(x10, Operand(Smi::FromInt(flags))); 1576 __ Mov(x10, Smi::FromInt(flags));
1578 __ Push(size, x10); 1577 __ Push(size, x10);
1579 1578
1580 CallRuntimeFromDeferred( 1579 CallRuntimeFromDeferred(
1581 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); 1580 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
1582 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result())); 1581 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1583 } 1582 }
1584 1583
1585 1584
1586 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 1585 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1587 Register receiver = ToRegister(instr->receiver()); 1586 Register receiver = ToRegister(instr->receiver());
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1647 ASSERT(masm()->StackPointer().Is(jssp)); 1646 ASSERT(masm()->StackPointer().Is(jssp));
1648 __ Sub(result, jssp, 2 * kPointerSize); 1647 __ Sub(result, jssp, 2 * kPointerSize);
1649 } else { 1648 } else {
1650 ASSERT(instr->temp() != NULL); 1649 ASSERT(instr->temp() != NULL);
1651 Register previous_fp = ToRegister(instr->temp()); 1650 Register previous_fp = ToRegister(instr->temp());
1652 1651
1653 __ Ldr(previous_fp, 1652 __ Ldr(previous_fp,
1654 MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 1653 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1655 __ Ldr(result, 1654 __ Ldr(result,
1656 MemOperand(previous_fp, StandardFrameConstants::kContextOffset)); 1655 MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
1657 __ Cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 1656 __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1658 __ Csel(result, fp, previous_fp, ne); 1657 __ Csel(result, fp, previous_fp, ne);
1659 } 1658 }
1660 } 1659 }
1661 1660
1662 1661
1663 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 1662 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1664 Register elements = ToRegister(instr->elements()); 1663 Register elements = ToRegister(instr->elements());
1665 Register result = ToRegister32(instr->result()); 1664 Register result = ToRegister32(instr->result());
1666 Label done; 1665 Label done;
1667 1666
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
1772 if (instr->hydrogen()->skip_check()) return; 1771 if (instr->hydrogen()->skip_check()) return;
1773 1772
1774 ASSERT(instr->hydrogen()->length()->representation().IsInteger32()); 1773 ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
1775 Register length = ToRegister32(instr->length()); 1774 Register length = ToRegister32(instr->length());
1776 1775
1777 if (instr->index()->IsConstantOperand()) { 1776 if (instr->index()->IsConstantOperand()) {
1778 int constant_index = 1777 int constant_index =
1779 ToInteger32(LConstantOperand::cast(instr->index())); 1778 ToInteger32(LConstantOperand::cast(instr->index()));
1780 1779
1781 if (instr->hydrogen()->length()->representation().IsSmi()) { 1780 if (instr->hydrogen()->length()->representation().IsSmi()) {
1782 __ Cmp(length, Operand(Smi::FromInt(constant_index))); 1781 __ Cmp(length, Smi::FromInt(constant_index));
1783 } else { 1782 } else {
1784 __ Cmp(length, Operand(constant_index)); 1783 __ Cmp(length, constant_index);
1785 } 1784 }
1786 } else { 1785 } else {
1787 ASSERT(instr->hydrogen()->index()->representation().IsInteger32()); 1786 ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
1788 __ Cmp(length, ToRegister32(instr->index())); 1787 __ Cmp(length, ToRegister32(instr->index()));
1789 } 1788 }
1790 Condition condition = instr->hydrogen()->allow_equality() ? lo : ls; 1789 Condition condition = instr->hydrogen()->allow_equality() ? lo : ls;
1791 ApplyCheckIf(condition, instr); 1790 ApplyCheckIf(condition, instr);
1792 } 1791 }
1793 1792
1794 1793
(...skipping 17 matching lines...) Expand all
1812 ASSERT(r.IsTagged()); 1811 ASSERT(r.IsTagged());
1813 Register value = ToRegister(instr->value()); 1812 Register value = ToRegister(instr->value());
1814 HType type = instr->hydrogen()->value()->type(); 1813 HType type = instr->hydrogen()->value()->type();
1815 1814
1816 if (type.IsBoolean()) { 1815 if (type.IsBoolean()) {
1817 ASSERT(!info()->IsStub()); 1816 ASSERT(!info()->IsStub());
1818 __ CompareRoot(value, Heap::kTrueValueRootIndex); 1817 __ CompareRoot(value, Heap::kTrueValueRootIndex);
1819 EmitBranch(instr, eq); 1818 EmitBranch(instr, eq);
1820 } else if (type.IsSmi()) { 1819 } else if (type.IsSmi()) {
1821 ASSERT(!info()->IsStub()); 1820 ASSERT(!info()->IsStub());
1822 EmitCompareAndBranch(instr, ne, value, Operand(Smi::FromInt(0))); 1821 EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
1823 } else if (type.IsJSArray()) { 1822 } else if (type.IsJSArray()) {
1824 ASSERT(!info()->IsStub()); 1823 ASSERT(!info()->IsStub());
1825 EmitGoto(instr->TrueDestination(chunk())); 1824 EmitGoto(instr->TrueDestination(chunk()));
1826 } else if (type.IsHeapNumber()) { 1825 } else if (type.IsHeapNumber()) {
1827 ASSERT(!info()->IsStub()); 1826 ASSERT(!info()->IsStub());
1828 __ Ldr(double_scratch(), FieldMemOperand(value, 1827 __ Ldr(double_scratch(), FieldMemOperand(value,
1829 HeapNumber::kValueOffset)); 1828 HeapNumber::kValueOffset));
1830 // Test the double value. Zero and NaN are false. 1829 // Test the double value. Zero and NaN are false.
1831 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch()); 1830 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1832 } else if (type.IsString()) { 1831 } else if (type.IsString()) {
(...skipping 1189 matching lines...) Expand 10 before | Expand all | Expand 10 after
3022 { 3021 {
3023 // Below we use Factory::the_hole_value() on purpose instead of loading from 3022 // Below we use Factory::the_hole_value() on purpose instead of loading from
3024 // the root array to force relocation and later be able to patch with a 3023 // the root array to force relocation and later be able to patch with a
3025 // custom value. 3024 // custom value.
3026 InstructionAccurateScope scope(masm(), 5); 3025 InstructionAccurateScope scope(masm(), 5);
3027 __ bind(&map_check); 3026 __ bind(&map_check);
3028 // Will be patched with the cached map. 3027 // Will be patched with the cached map.
3029 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); 3028 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
3030 __ LoadRelocated(scratch, Operand(Handle<Object>(cell))); 3029 __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
3031 __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset)); 3030 __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
3032 __ cmp(map, Operand(scratch)); 3031 __ cmp(map, scratch);
3033 __ b(&cache_miss, ne); 3032 __ b(&cache_miss, ne);
3034 // The address of this instruction is computed relative to the map check 3033 // The address of this instruction is computed relative to the map check
3035 // above, so check the size of the code generated. 3034 // above, so check the size of the code generated.
3036 ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4); 3035 ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
3037 // Will be patched with the cached result. 3036 // Will be patched with the cached result.
3038 __ LoadRelocated(result, Operand(factory()->the_hole_value())); 3037 __ LoadRelocated(result, Operand(factory()->the_hole_value()));
3039 } 3038 }
3040 __ B(&done); 3039 __ B(&done);
3041 3040
3042 // The inlined call site cache did not match. 3041 // The inlined call site cache did not match.
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
3134 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { 3133 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3135 Register temp1 = ToRegister(instr->temp1()); 3134 Register temp1 = ToRegister(instr->temp1());
3136 Register temp2 = ToRegister(instr->temp2()); 3135 Register temp2 = ToRegister(instr->temp2());
3137 3136
3138 // Get the frame pointer for the calling frame. 3137 // Get the frame pointer for the calling frame.
3139 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3138 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3140 3139
3141 // Skip the arguments adaptor frame if it exists. 3140 // Skip the arguments adaptor frame if it exists.
3142 Label check_frame_marker; 3141 Label check_frame_marker;
3143 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); 3142 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
3144 __ Cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3143 __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3145 __ B(ne, &check_frame_marker); 3144 __ B(ne, &check_frame_marker);
3146 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); 3145 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
3147 3146
3148 // Check the marker in the calling frame. 3147 // Check the marker in the calling frame.
3149 __ Bind(&check_frame_marker); 3148 __ Bind(&check_frame_marker);
3150 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); 3149 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
3151 3150
3152 EmitCompareAndBranch( 3151 EmitCompareAndBranch(
3153 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); 3152 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3154 } 3153 }
(...skipping 973 matching lines...) Expand 10 before | Expand all | Expand 10 after
4128 // indicate that positive dividends are heavily favored, so the branching 4127 // indicate that positive dividends are heavily favored, so the branching
4129 // version performs better. 4128 // version performs better.
4130 HMod* hmod = instr->hydrogen(); 4129 HMod* hmod = instr->hydrogen();
4131 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 4130 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4132 Label dividend_is_not_negative, done; 4131 Label dividend_is_not_negative, done;
4133 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 4132 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4134 __ Cmp(dividend, 0); 4133 __ Cmp(dividend, 0);
4135 __ B(pl, &dividend_is_not_negative); 4134 __ B(pl, &dividend_is_not_negative);
4136 // Note that this is correct even for kMinInt operands. 4135 // Note that this is correct even for kMinInt operands.
4137 __ Neg(dividend, dividend); 4136 __ Neg(dividend, dividend);
4138 __ And(dividend, dividend, Operand(mask)); 4137 __ And(dividend, dividend, mask);
4139 __ Negs(dividend, dividend); 4138 __ Negs(dividend, dividend);
4140 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 4139 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4141 DeoptimizeIf(eq, instr->environment()); 4140 DeoptimizeIf(eq, instr->environment());
4142 } 4141 }
4143 __ B(&done); 4142 __ B(&done);
4144 } 4143 }
4145 4144
4146 __ bind(&dividend_is_not_negative); 4145 __ bind(&dividend_is_not_negative);
4147 __ And(dividend, dividend, Operand(mask)); 4146 __ And(dividend, dividend, mask);
4148 __ bind(&done); 4147 __ bind(&done);
4149 } 4148 }
4150 4149
4151 4150
4152 void LCodeGen::DoModByConstI(LModByConstI* instr) { 4151 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4153 Register dividend = ToRegister32(instr->dividend()); 4152 Register dividend = ToRegister32(instr->dividend());
4154 int32_t divisor = instr->divisor(); 4153 int32_t divisor = instr->divisor();
4155 Register result = ToRegister32(instr->result()); 4154 Register result = ToRegister32(instr->result());
4156 Register temp = ToRegister32(instr->temp()); 4155 Register temp = ToRegister32(instr->temp());
4157 ASSERT(!AreAliased(dividend, result, temp)); 4156 ASSERT(!AreAliased(dividend, result, temp));
(...skipping 709 matching lines...) Expand 10 before | Expand all | Expand 10 after
4867 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 4866 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
4868 ASSERT(ToRegister(instr->context()).is(cp)); 4867 ASSERT(ToRegister(instr->context()).is(cp));
4869 Register scratch1 = x5; 4868 Register scratch1 = x5;
4870 Register scratch2 = x6; 4869 Register scratch2 = x6;
4871 ASSERT(instr->IsMarkedAsCall()); 4870 ASSERT(instr->IsMarkedAsCall());
4872 4871
4873 ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals"); 4872 ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
4874 // TODO(all): if Mov could handle object in new space then it could be used 4873 // TODO(all): if Mov could handle object in new space then it could be used
4875 // here. 4874 // here.
4876 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs()); 4875 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
4877 __ Mov(scratch2, Operand(Smi::FromInt(instr->hydrogen()->flags()))); 4876 __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
4878 __ Push(cp, scratch1, scratch2); // The context is the first argument. 4877 __ Push(cp, scratch1, scratch2); // The context is the first argument.
4879 CallRuntime(Runtime::kDeclareGlobals, 3, instr); 4878 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
4880 } 4879 }
4881 4880
4882 4881
4883 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { 4882 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4884 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 4883 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4885 LoadContextFromDeferred(instr->context()); 4884 LoadContextFromDeferred(instr->context());
4886 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); 4885 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4887 RecordSafepointWithLazyDeopt( 4886 RecordSafepointWithLazyDeopt(
(...skipping 692 matching lines...) Expand 10 before | Expand all | Expand 10 after
5580 __ Mov(x1, x0); 5579 __ Mov(x1, x0);
5581 5580
5582 __ Bind(&materialized); 5581 __ Bind(&materialized);
5583 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; 5582 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5584 Label allocated, runtime_allocate; 5583 Label allocated, runtime_allocate;
5585 5584
5586 __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT); 5585 __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
5587 __ B(&allocated); 5586 __ B(&allocated);
5588 5587
5589 __ Bind(&runtime_allocate); 5588 __ Bind(&runtime_allocate);
5590 __ Mov(x0, Operand(Smi::FromInt(size))); 5589 __ Mov(x0, Smi::FromInt(size));
5591 __ Push(x1, x0); 5590 __ Push(x1, x0);
5592 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); 5591 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5593 __ Pop(x1); 5592 __ Pop(x1);
5594 5593
5595 __ Bind(&allocated); 5594 __ Bind(&allocated);
5596 // Copy the content into the newly allocated memory. 5595 // Copy the content into the newly allocated memory.
5597 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize); 5596 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5598 } 5597 }
5599 5598
5600 5599
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after
5814 5813
5815 5814
5816 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { 5815 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5817 Register object = ToRegister(instr->object()); 5816 Register object = ToRegister(instr->object());
5818 Register index = ToRegister(instr->index()); 5817 Register index = ToRegister(instr->index());
5819 Register result = ToRegister(instr->result()); 5818 Register result = ToRegister(instr->result());
5820 5819
5821 __ AssertSmi(index); 5820 __ AssertSmi(index);
5822 5821
5823 Label out_of_object, done; 5822 Label out_of_object, done;
5824 __ Cmp(index, Operand(Smi::FromInt(0))); 5823 __ Cmp(index, Smi::FromInt(0));
5825 __ B(lt, &out_of_object); 5824 __ B(lt, &out_of_object);
5826 5825
5827 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize); 5826 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5828 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); 5827 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5829 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize)); 5828 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
5830 5829
5831 __ B(&done); 5830 __ B(&done);
5832 5831
5833 __ Bind(&out_of_object); 5832 __ Bind(&out_of_object);
5834 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 5833 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5835 // Index is equal to negated out of object property index plus 1. 5834 // Index is equal to negated out of object property index plus 1.
5836 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); 5835 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5837 __ Ldr(result, FieldMemOperand(result, 5836 __ Ldr(result, FieldMemOperand(result,
5838 FixedArray::kHeaderSize - kPointerSize)); 5837 FixedArray::kHeaderSize - kPointerSize));
5839 __ Bind(&done); 5838 __ Bind(&done);
5840 } 5839 }
5841 5840
5842 } } // namespace v8::internal 5841 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/a64/ic-a64.cc ('k') | src/a64/lithium-gap-resolver-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698