Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(300)

Side by Side Diff: runtime/vm/intermediate_language_ia32.cc

Issue 150063004: Support reusable boxes for Float32x4 fields (Closed) Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/intermediate_language_arm.cc ('k') | runtime/vm/intermediate_language_mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32.
6 #if defined(TARGET_ARCH_IA32) 6 #if defined(TARGET_ARCH_IA32)
7 7
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 9
10 #include "vm/dart_entry.h" 10 #include "vm/dart_entry.h"
(...skipping 1587 matching lines...) Expand 10 before | Expand all | Expand 10 after
1598 1598
1599 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { 1599 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
1600 __ Comment("StoreInstanceFieldSlowPath"); 1600 __ Comment("StoreInstanceFieldSlowPath");
1601 __ Bind(entry_label()); 1601 __ Bind(entry_label());
1602 const Code& stub = 1602 const Code& stub =
1603 Code::Handle(StubCode::GetAllocationStubForClass(cls_)); 1603 Code::Handle(StubCode::GetAllocationStubForClass(cls_));
1604 const ExternalLabel label(cls_.ToCString(), stub.EntryPoint()); 1604 const ExternalLabel label(cls_.ToCString(), stub.EntryPoint());
1605 1605
1606 LocationSummary* locs = instruction_->locs(); 1606 LocationSummary* locs = instruction_->locs();
1607 locs->live_registers()->Remove(locs->out()); 1607 locs->live_registers()->Remove(locs->out());
1608
1609 compiler->SaveLiveRegisters(locs); 1608 compiler->SaveLiveRegisters(locs);
1610 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position. 1609 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
1611 &label, 1610 &label,
1612 PcDescriptors::kOther, 1611 PcDescriptors::kOther,
1613 locs); 1612 locs);
1614 __ MoveRegister(locs->temp(0).reg(), EAX); 1613 __ MoveRegister(locs->temp(0).reg(), EAX);
1615 compiler->RestoreLiveRegisters(locs); 1614 compiler->RestoreLiveRegisters(locs);
1616
1617 __ jmp(exit_label()); 1615 __ jmp(exit_label());
1618 } 1616 }
1619 1617
1620 private: 1618 private:
1621 StoreInstanceFieldInstr* instruction_; 1619 StoreInstanceFieldInstr* instruction_;
1622 const Class& cls_; 1620 const Class& cls_;
1623 }; 1621 };
1624 1622
1625 1623
1626 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(bool opt) const { 1624 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(bool opt) const {
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1664 Register temp = locs()->temp(0).reg(); 1662 Register temp = locs()->temp(0).reg();
1665 Register temp2 = locs()->temp(1).reg(); 1663 Register temp2 = locs()->temp(1).reg();
1666 const intptr_t cid = field().UnboxedFieldCid(); 1664 const intptr_t cid = field().UnboxedFieldCid();
1667 1665
1668 if (is_initialization_) { 1666 if (is_initialization_) {
1669 const Class* cls = NULL; 1667 const Class* cls = NULL;
1670 switch (cid) { 1668 switch (cid) {
1671 case kDoubleCid: 1669 case kDoubleCid:
1672 cls = &compiler->double_class(); 1670 cls = &compiler->double_class();
1673 break; 1671 break;
1674 // TODO(johnmccutchan): Add kFloat32x4Cid here. 1672 case kFloat32x4Cid:
1673 cls = &compiler->float32x4_class();
1674 break;
1675 default: 1675 default:
1676 UNREACHABLE(); 1676 UNREACHABLE();
1677 } 1677 }
1678
1678 StoreInstanceFieldSlowPath* slow_path = 1679 StoreInstanceFieldSlowPath* slow_path =
1679 new StoreInstanceFieldSlowPath(this, *cls); 1680 new StoreInstanceFieldSlowPath(this, *cls);
1680 compiler->AddSlowPathCode(slow_path); 1681 compiler->AddSlowPathCode(slow_path);
1681 1682
1682 __ TryAllocate(*cls, 1683 __ TryAllocate(*cls,
1683 slow_path->entry_label(), 1684 slow_path->entry_label(),
1684 Assembler::kFarJump, 1685 Assembler::kFarJump,
1685 temp, 1686 temp,
1686 temp2); 1687 temp2);
1687 __ Bind(slow_path->exit_label()); 1688 __ Bind(slow_path->exit_label());
1688 __ movl(temp2, temp); 1689 __ movl(temp2, temp);
1689 __ StoreIntoObject(instance_reg, 1690 __ StoreIntoObject(instance_reg,
1690 FieldAddress(instance_reg, field().Offset()), 1691 FieldAddress(instance_reg, field().Offset()),
1691 temp2); 1692 temp2);
1692 } else { 1693 } else {
1693 __ movl(temp, FieldAddress(instance_reg, field().Offset())); 1694 __ movl(temp, FieldAddress(instance_reg, field().Offset()));
1694 } 1695 }
1695 switch (cid) { 1696 switch (cid) {
1696 case kDoubleCid: 1697 case kDoubleCid:
1697 __ movsd(FieldAddress(temp, Double::value_offset()), value); 1698 __ Comment("UnboxedDoubleStoreInstanceFieldInstr");
1698 // TODO(johnmccutchan): Add kFloat32x4Cid here. 1699 __ movsd(FieldAddress(temp, Double::value_offset()), value);
1700 break;
1701 case kFloat32x4Cid:
1702 __ Comment("UnboxedFloat32x4StoreInstanceFieldInstr");
1703 __ movups(FieldAddress(temp, Float32x4::value_offset()), value);
1699 break; 1704 break;
1700 default: 1705 default:
1701 UNREACHABLE(); 1706 UNREACHABLE();
1702 } 1707 }
1703 return; 1708 return;
1704 } 1709 }
1705 1710
1706 if (IsPotentialUnboxedStore()) { 1711 if (IsPotentialUnboxedStore()) {
1707 Register value_reg = locs()->in(1).reg(); 1712 Register value_reg = locs()->in(1).reg();
1708 Register temp = locs()->temp(0).reg(); 1713 Register temp = locs()->temp(0).reg();
1709 Register temp2 = locs()->temp(1).reg(); 1714 Register temp2 = locs()->temp(1).reg();
1710 FpuRegister fpu_temp = locs()->temp(2).fpu_reg(); 1715 FpuRegister fpu_temp = locs()->temp(2).fpu_reg();
1711 1716
1712 Label store_pointer; 1717 Label store_pointer;
1713 Label copy_double;
1714 Label store_double; 1718 Label store_double;
1719 Label store_float32x4;
1715 1720
1716 __ LoadObject(temp, Field::ZoneHandle(field().raw())); 1721 __ LoadObject(temp, Field::ZoneHandle(field().raw()));
1717 1722
1718 __ cmpl(FieldAddress(temp, Field::is_nullable_offset()), 1723 __ cmpl(FieldAddress(temp, Field::is_nullable_offset()),
1719 Immediate(kNullCid)); 1724 Immediate(kNullCid));
1720 __ j(EQUAL, &store_pointer); 1725 __ j(EQUAL, &store_pointer);
1721 1726
1722 __ movzxb(temp2, FieldAddress(temp, Field::kind_bits_offset())); 1727 __ movzxb(temp2, FieldAddress(temp, Field::kind_bits_offset()));
1723 __ testl(temp2, Immediate(1 << Field::kUnboxingCandidateBit)); 1728 __ testl(temp2, Immediate(1 << Field::kUnboxingCandidateBit));
1724 __ j(ZERO, &store_pointer); 1729 __ j(ZERO, &store_pointer);
1725 1730
1726 __ cmpl(FieldAddress(temp, Field::guarded_cid_offset()), 1731 __ cmpl(FieldAddress(temp, Field::guarded_cid_offset()),
1727 Immediate(kDoubleCid)); 1732 Immediate(kDoubleCid));
1728 __ j(EQUAL, &store_double); 1733 __ j(EQUAL, &store_double);
1729 1734
1735 __ cmpl(FieldAddress(temp, Field::guarded_cid_offset()),
1736 Immediate(kFloat32x4Cid));
1737 __ j(EQUAL, &store_float32x4);
1738
1730 // Fall through. 1739 // Fall through.
1731 __ jmp(&store_pointer); 1740 __ jmp(&store_pointer);
1732 1741
1733 __ Bind(&store_double);
1734
1735 const Immediate& raw_null =
1736 Immediate(reinterpret_cast<intptr_t>(Object::null()));
1737 __ movl(temp, FieldAddress(instance_reg, field().Offset()));
1738 __ cmpl(temp, raw_null);
1739 __ j(NOT_EQUAL, &copy_double);
1740
1741 StoreInstanceFieldSlowPath* slow_path =
1742 new StoreInstanceFieldSlowPath(this, compiler->double_class());
1743 compiler->AddSlowPathCode(slow_path);
1744 1742
1745 if (!compiler->is_optimizing()) { 1743 if (!compiler->is_optimizing()) {
1746 locs()->live_registers()->Add(locs()->in(0)); 1744 locs()->live_registers()->Add(locs()->in(0));
1747 locs()->live_registers()->Add(locs()->in(1)); 1745 locs()->live_registers()->Add(locs()->in(1));
1748 } 1746 }
1749 1747
1750 __ TryAllocate(compiler->double_class(), 1748 {
1751 slow_path->entry_label(), 1749 __ Bind(&store_double);
1752 Assembler::kFarJump, 1750 Label copy_double;
1753 temp,
1754 temp2);
1755 __ Bind(slow_path->exit_label());
1756 __ movl(temp2, temp);
1757 __ StoreIntoObject(instance_reg,
1758 FieldAddress(instance_reg, field().Offset()),
1759 temp2);
1760 1751
1761 __ Bind(&copy_double); 1752 StoreInstanceFieldSlowPath* slow_path =
1762 __ movsd(fpu_temp, FieldAddress(value_reg, Double::value_offset())); 1753 new StoreInstanceFieldSlowPath(this, compiler->double_class());
1763 __ movsd(FieldAddress(temp, Double::value_offset()), fpu_temp); 1754 compiler->AddSlowPathCode(slow_path);
1764 __ jmp(&skip_store); 1755
1756 const Immediate& raw_null =
1757 Immediate(reinterpret_cast<intptr_t>(Object::null()));
1758 __ movl(temp, FieldAddress(instance_reg, field().Offset()));
1759 __ cmpl(temp, raw_null);
1760 __ j(NOT_EQUAL, &copy_double);
1761
1762 __ TryAllocate(compiler->double_class(),
1763 slow_path->entry_label(),
1764 Assembler::kFarJump,
1765 temp,
1766 temp2);
1767 __ Bind(slow_path->exit_label());
1768 __ movl(temp2, temp);
1769 __ StoreIntoObject(instance_reg,
1770 FieldAddress(instance_reg, field().Offset()),
1771 temp2);
1772
1773 __ Bind(&copy_double);
1774 __ movsd(fpu_temp, FieldAddress(value_reg, Double::value_offset()));
1775 __ movsd(FieldAddress(temp, Double::value_offset()), fpu_temp);
1776 __ jmp(&skip_store);
1777 }
1778
1779 {
1780 __ Bind(&store_float32x4);
1781 Label copy_float32x4;
1782
1783 StoreInstanceFieldSlowPath* slow_path =
1784 new StoreInstanceFieldSlowPath(this, compiler->float32x4_class());
1785 compiler->AddSlowPathCode(slow_path);
1786
1787 const Immediate& raw_null =
1788 Immediate(reinterpret_cast<intptr_t>(Object::null()));
1789 __ movl(temp, FieldAddress(instance_reg, field().Offset()));
1790 __ cmpl(temp, raw_null);
1791 __ j(NOT_EQUAL, &copy_float32x4);
1792
1793 __ TryAllocate(compiler->float32x4_class(),
1794 slow_path->entry_label(),
1795 Assembler::kFarJump,
1796 temp,
1797 temp2);
1798 __ Bind(slow_path->exit_label());
1799 __ movl(temp2, temp);
1800 __ StoreIntoObject(instance_reg,
1801 FieldAddress(instance_reg, field().Offset()),
1802 temp2);
1803
1804 __ Bind(&copy_float32x4);
1805 __ movups(fpu_temp, FieldAddress(value_reg, Float32x4::value_offset()));
1806 __ movups(FieldAddress(temp, Float32x4::value_offset()), fpu_temp);
1807 __ jmp(&skip_store);
1808 }
1765 __ Bind(&store_pointer); 1809 __ Bind(&store_pointer);
1766 } 1810 }
1767 1811
1768 if (ShouldEmitStoreBarrier()) { 1812 if (ShouldEmitStoreBarrier()) {
1769 Register value_reg = locs()->in(1).reg(); 1813 Register value_reg = locs()->in(1).reg();
1770 __ StoreIntoObject(instance_reg, 1814 __ StoreIntoObject(instance_reg,
1771 FieldAddress(instance_reg, field().Offset()), 1815 FieldAddress(instance_reg, field().Offset()),
1772 value_reg, 1816 value_reg,
1773 CanValueBeSmi()); 1817 CanValueBeSmi());
1774 } else { 1818 } else {
(...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after
1931 compiler->RestoreLiveRegisters(locs); 1975 compiler->RestoreLiveRegisters(locs);
1932 1976
1933 __ jmp(exit_label()); 1977 __ jmp(exit_label());
1934 } 1978 }
1935 1979
1936 private: 1980 private:
1937 Instruction* instruction_; 1981 Instruction* instruction_;
1938 }; 1982 };
1939 1983
1940 1984
1985 class BoxFloat32x4SlowPath : public SlowPathCode {
1986 public:
1987 explicit BoxFloat32x4SlowPath(Instruction* instruction)
1988 : instruction_(instruction) { }
1989
1990 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
1991 __ Comment("BoxFloat32x4SlowPath");
1992 __ Bind(entry_label());
1993 const Class& float32x4_class = compiler->float32x4_class();
1994 const Code& stub =
1995 Code::Handle(StubCode::GetAllocationStubForClass(float32x4_class));
1996 const ExternalLabel label(float32x4_class.ToCString(), stub.EntryPoint());
1997
1998 LocationSummary* locs = instruction_->locs();
1999 locs->live_registers()->Remove(locs->out());
2000
2001 compiler->SaveLiveRegisters(locs);
2002 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
2003 &label,
2004 PcDescriptors::kOther,
2005 locs);
2006 __ MoveRegister(locs->out().reg(), EAX);
2007 compiler->RestoreLiveRegisters(locs);
2008
2009 __ jmp(exit_label());
2010 }
2011
2012 private:
2013 Instruction* instruction_;
2014 };
2015
2016
2017
1941 LocationSummary* LoadFieldInstr::MakeLocationSummary(bool opt) const { 2018 LocationSummary* LoadFieldInstr::MakeLocationSummary(bool opt) const {
1942 const intptr_t kNumInputs = 1; 2019 const intptr_t kNumInputs = 1;
1943 const intptr_t kNumTemps = 0; 2020 const intptr_t kNumTemps = 0;
1944 LocationSummary* locs = 2021 LocationSummary* locs =
1945 new LocationSummary( 2022 new LocationSummary(
1946 kNumInputs, kNumTemps, 2023 kNumInputs, kNumTemps,
1947 (opt && !IsPotentialUnboxedLoad()) 2024 (opt && !IsPotentialUnboxedLoad())
1948 ? LocationSummary::kNoCall 2025 ? LocationSummary::kNoCall
1949 : LocationSummary::kCallOnSlowPath); 2026 : LocationSummary::kCallOnSlowPath);
1950 2027
(...skipping 13 matching lines...) Expand all
1964 2041
1965 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2042 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1966 Register instance_reg = locs()->in(0).reg(); 2043 Register instance_reg = locs()->in(0).reg();
1967 if (IsUnboxedLoad() && compiler->is_optimizing()) { 2044 if (IsUnboxedLoad() && compiler->is_optimizing()) {
1968 XmmRegister result = locs()->out().fpu_reg(); 2045 XmmRegister result = locs()->out().fpu_reg();
1969 Register temp = locs()->temp(0).reg(); 2046 Register temp = locs()->temp(0).reg();
1970 __ movl(temp, FieldAddress(instance_reg, offset_in_bytes())); 2047 __ movl(temp, FieldAddress(instance_reg, offset_in_bytes()));
1971 const intptr_t cid = field()->UnboxedFieldCid(); 2048 const intptr_t cid = field()->UnboxedFieldCid();
1972 switch (cid) { 2049 switch (cid) {
1973 case kDoubleCid: 2050 case kDoubleCid:
2051 __ Comment("UnboxedDoubleLoadFieldInstr");
1974 __ movsd(result, FieldAddress(temp, Double::value_offset())); 2052 __ movsd(result, FieldAddress(temp, Double::value_offset()));
1975 break; 2053 break;
1976 // TODO(johnmccutchan): Add Float32x4 path here. 2054 case kFloat32x4Cid:
2055 __ Comment("UnboxedFloat32x4LoadFieldInstr");
2056 __ movups(result, FieldAddress(temp, Float32x4::value_offset()));
2057 break;
1977 default: 2058 default:
1978 UNREACHABLE(); 2059 UNREACHABLE();
1979 } 2060 }
1980 return; 2061 return;
1981 } 2062 }
1982 2063
1983 Label done; 2064 Label done;
1984 Register result = locs()->out().reg(); 2065 Register result = locs()->out().reg();
1985 if (IsPotentialUnboxedLoad()) { 2066 if (IsPotentialUnboxedLoad()) {
1986 Register temp = locs()->temp(1).reg(); 2067 Register temp = locs()->temp(1).reg();
1987 XmmRegister value = locs()->temp(0).fpu_reg(); 2068 XmmRegister value = locs()->temp(0).fpu_reg();
1988 2069
2070
1989 Label load_pointer; 2071 Label load_pointer;
1990 Label load_double; 2072 Label load_double;
2073 Label load_float32x4;
2074
1991 __ LoadObject(result, Field::ZoneHandle(field()->raw())); 2075 __ LoadObject(result, Field::ZoneHandle(field()->raw()));
1992 2076
1993 FieldAddress field_cid_operand(result, Field::guarded_cid_offset()); 2077 FieldAddress field_cid_operand(result, Field::guarded_cid_offset());
1994 FieldAddress field_nullability_operand(result, Field::is_nullable_offset()); 2078 FieldAddress field_nullability_operand(result, Field::is_nullable_offset());
1995 2079
1996 __ cmpl(field_nullability_operand, Immediate(kNullCid)); 2080 __ cmpl(field_nullability_operand, Immediate(kNullCid));
1997 __ j(EQUAL, &load_pointer); 2081 __ j(EQUAL, &load_pointer);
1998 2082
1999 __ cmpl(field_cid_operand, Immediate(kDoubleCid)); 2083 __ cmpl(field_cid_operand, Immediate(kDoubleCid));
2000 __ j(EQUAL, &load_double); 2084 __ j(EQUAL, &load_double);
2001 2085
2086 __ cmpl(field_cid_operand, Immediate(kFloat32x4Cid));
2087 __ j(EQUAL, &load_float32x4);
2088
2002 // Fall through. 2089 // Fall through.
2003 __ jmp(&load_pointer); 2090 __ jmp(&load_pointer);
2004 2091
2005 __ Bind(&load_double);
2006 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this);
2007 compiler->AddSlowPathCode(slow_path);
2008
2009 if (!compiler->is_optimizing()) { 2092 if (!compiler->is_optimizing()) {
2010 locs()->live_registers()->Add(locs()->in(0)); 2093 locs()->live_registers()->Add(locs()->in(0));
2011 } 2094 }
2012 2095
2013 __ TryAllocate(compiler->double_class(), 2096 {
2014 slow_path->entry_label(), 2097 __ Bind(&load_double);
2015 Assembler::kFarJump, 2098 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this);
2016 result, 2099 compiler->AddSlowPathCode(slow_path);
2017 temp);
2018 __ Bind(slow_path->exit_label());
2019 __ movl(temp, FieldAddress(instance_reg, offset_in_bytes()));
2020 __ movsd(value, FieldAddress(temp, Double::value_offset()));
2021 __ movsd(FieldAddress(result, Double::value_offset()), value);
2022 __ jmp(&done);
2023 2100
2024 // TODO(johnmccutchan): Add Float32x4 path here. 2101 __ TryAllocate(compiler->double_class(),
2102 slow_path->entry_label(),
2103 Assembler::kFarJump,
2104 result,
2105 temp);
2106 __ Bind(slow_path->exit_label());
2107 __ movl(temp, FieldAddress(instance_reg, offset_in_bytes()));
2108 __ movsd(value, FieldAddress(temp, Double::value_offset()));
2109 __ movsd(FieldAddress(result, Double::value_offset()), value);
2110 __ jmp(&done);
2111 }
2112
2113 {
2114 __ Bind(&load_float32x4);
2115
2116 BoxFloat32x4SlowPath* slow_path = new BoxFloat32x4SlowPath(this);
2117 compiler->AddSlowPathCode(slow_path);
2118
2119 __ TryAllocate(compiler->float32x4_class(),
2120 slow_path->entry_label(),
2121 Assembler::kFarJump,
2122 result,
2123 temp);
2124 __ Bind(slow_path->exit_label());
2125 __ movl(temp, FieldAddress(instance_reg, offset_in_bytes()));
2126 __ movups(value, FieldAddress(temp, Float32x4::value_offset()));
2127 __ movups(FieldAddress(result, Float32x4::value_offset()), value);
2128 __ jmp(&done);
2129 }
2025 2130
2026 __ Bind(&load_pointer); 2131 __ Bind(&load_pointer);
2027 } 2132 }
2028 __ movl(result, FieldAddress(instance_reg, offset_in_bytes())); 2133 __ movl(result, FieldAddress(instance_reg, offset_in_bytes()));
2029 __ Bind(&done); 2134 __ Bind(&done);
2030 } 2135 }
2031 2136
2032 2137
2033 LocationSummary* InstantiateTypeInstr::MakeLocationSummary(bool opt) const { 2138 LocationSummary* InstantiateTypeInstr::MakeLocationSummary(bool opt) const {
2034 const intptr_t kNumInputs = 1; 2139 const intptr_t kNumInputs = 1;
(...skipping 835 matching lines...) Expand 10 before | Expand all | Expand 10 after
2870 } else { 2975 } else {
2871 Register temp = locs()->temp(0).reg(); 2976 Register temp = locs()->temp(0).reg();
2872 __ movl(temp, left); 2977 __ movl(temp, left);
2873 __ orl(temp, right); 2978 __ orl(temp, right);
2874 __ testl(temp, Immediate(kSmiTagMask)); 2979 __ testl(temp, Immediate(kSmiTagMask));
2875 } 2980 }
2876 __ j(ZERO, deopt); 2981 __ j(ZERO, deopt);
2877 } 2982 }
2878 2983
2879 2984
2985
2986
2987
2880 LocationSummary* BoxDoubleInstr::MakeLocationSummary(bool opt) const { 2988 LocationSummary* BoxDoubleInstr::MakeLocationSummary(bool opt) const {
2881 const intptr_t kNumInputs = 1; 2989 const intptr_t kNumInputs = 1;
2882 const intptr_t kNumTemps = 0; 2990 const intptr_t kNumTemps = 0;
2883 LocationSummary* summary = 2991 LocationSummary* summary =
2884 new LocationSummary(kNumInputs, 2992 new LocationSummary(kNumInputs,
2885 kNumTemps, 2993 kNumTemps,
2886 LocationSummary::kCallOnSlowPath); 2994 LocationSummary::kCallOnSlowPath);
2887 summary->set_in(0, Location::RequiresFpuRegister()); 2995 summary->set_in(0, Location::RequiresFpuRegister());
2888 summary->set_out(Location::RequiresRegister()); 2996 summary->set_out(Location::RequiresRegister());
2889 return summary; 2997 return summary;
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
2959 LocationSummary* summary = 3067 LocationSummary* summary =
2960 new LocationSummary(kNumInputs, 3068 new LocationSummary(kNumInputs,
2961 kNumTemps, 3069 kNumTemps,
2962 LocationSummary::kCallOnSlowPath); 3070 LocationSummary::kCallOnSlowPath);
2963 summary->set_in(0, Location::RequiresFpuRegister()); 3071 summary->set_in(0, Location::RequiresFpuRegister());
2964 summary->set_out(Location::RequiresRegister()); 3072 summary->set_out(Location::RequiresRegister());
2965 return summary; 3073 return summary;
2966 } 3074 }
2967 3075
2968 3076
2969 class BoxFloat32x4SlowPath : public SlowPathCode {
2970 public:
2971 explicit BoxFloat32x4SlowPath(BoxFloat32x4Instr* instruction)
2972 : instruction_(instruction) { }
2973
2974 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2975 __ Comment("BoxFloat32x4SlowPath");
2976 __ Bind(entry_label());
2977 const Class& float32x4_class = compiler->float32x4_class();
2978 const Code& stub =
2979 Code::Handle(StubCode::GetAllocationStubForClass(float32x4_class));
2980 const ExternalLabel label(float32x4_class.ToCString(), stub.EntryPoint());
2981
2982 LocationSummary* locs = instruction_->locs();
2983 locs->live_registers()->Remove(locs->out());
2984
2985 compiler->SaveLiveRegisters(locs);
2986 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
2987 &label,
2988 PcDescriptors::kOther,
2989 locs);
2990 __ MoveRegister(locs->out().reg(), EAX);
2991 compiler->RestoreLiveRegisters(locs);
2992
2993 __ jmp(exit_label());
2994 }
2995
2996 private:
2997 BoxFloat32x4Instr* instruction_;
2998 };
2999
3000
3001 void BoxFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 3077 void BoxFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3002 BoxFloat32x4SlowPath* slow_path = new BoxFloat32x4SlowPath(this); 3078 BoxFloat32x4SlowPath* slow_path = new BoxFloat32x4SlowPath(this);
3003 compiler->AddSlowPathCode(slow_path); 3079 compiler->AddSlowPathCode(slow_path);
3004 3080
3005 Register out_reg = locs()->out().reg(); 3081 Register out_reg = locs()->out().reg();
3006 XmmRegister value = locs()->in(0).fpu_reg(); 3082 XmmRegister value = locs()->in(0).fpu_reg();
3007 3083
3008 __ TryAllocate(compiler->float32x4_class(), 3084 __ TryAllocate(compiler->float32x4_class(),
3009 slow_path->entry_label(), 3085 slow_path->entry_label(),
3010 Assembler::kFarJump, 3086 Assembler::kFarJump,
(...skipping 2270 matching lines...) Expand 10 before | Expand all | Expand 10 after
5281 PcDescriptors::kOther, 5357 PcDescriptors::kOther,
5282 locs()); 5358 locs());
5283 __ Drop(2); // Discard type arguments and receiver. 5359 __ Drop(2); // Discard type arguments and receiver.
5284 } 5360 }
5285 5361
5286 } // namespace dart 5362 } // namespace dart
5287 5363
5288 #undef __ 5364 #undef __
5289 5365
5290 #endif // defined TARGET_ARCH_IA32 5366 #endif // defined TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « runtime/vm/intermediate_language_arm.cc ('k') | runtime/vm/intermediate_language_mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698