Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(710)

Side by Side Diff: runtime/vm/intermediate_language_arm.cc

Issue 150063004: Support reusable boxes for Float32x4 fields (Closed) Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 9
10 #include "vm/dart_entry.h" 10 #include "vm/dart_entry.h"
(...skipping 1565 matching lines...) Expand 10 before | Expand all | Expand 10 after
1576 UNREACHABLE(); 1576 UNREACHABLE();
1577 } 1577 }
1578 } 1578 }
1579 } 1579 }
1580 __ Bind(&ok); 1580 __ Bind(&ok);
1581 } 1581 }
1582 1582
1583 1583
1584 class StoreInstanceFieldSlowPath : public SlowPathCode { 1584 class StoreInstanceFieldSlowPath : public SlowPathCode {
1585 public: 1585 public:
1586 StoreInstanceFieldSlowPath(StoreInstanceFieldInstr* instruction, 1586 explicit StoreInstanceFieldSlowPath(StoreInstanceFieldInstr* instruction)
1587 const Class& cls) 1587 : instruction_(instruction) { }
1588 : instruction_(instruction), cls_(cls) { }
1589 1588
1590 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { 1589 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
1591 __ Comment("StoreInstanceFieldSlowPath"); 1590 __ Comment("StoreInstanceFieldSlowPath");
1592 __ Bind(entry_label()); 1591 {
1593 const Code& stub = 1592 __ Bind(double_entry_label());
1594 Code::Handle(StubCode::GetAllocationStubForClass(cls_));
1595 const ExternalLabel label(cls_.ToCString(), stub.EntryPoint());
1596 1593
1597 LocationSummary* locs = instruction_->locs(); 1594 const Class& cls = compiler->double_class();
1598 locs->live_registers()->Remove(locs->out()); 1595 const Code& stub =
1596 Code::Handle(StubCode::GetAllocationStubForClass(cls));
1597 const ExternalLabel label(cls.ToCString(), stub.EntryPoint());
1599 1598
1600 compiler->SaveLiveRegisters(locs); 1599 LocationSummary* locs = instruction_->locs();
1601 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position. 1600 locs->live_registers()->Remove(locs->out());
1602 &label,
1603 PcDescriptors::kOther,
1604 locs);
1605 __ MoveRegister(locs->temp(0).reg(), R0);
1606 compiler->RestoreLiveRegisters(locs);
1607 1601
1608 __ b(exit_label()); 1602 compiler->SaveLiveRegisters(locs);
1603 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
1604 &label,
1605 PcDescriptors::kOther,
1606 locs);
1607 __ MoveRegister(locs->temp(0).reg(), R0);
1608 compiler->RestoreLiveRegisters(locs);
1609
1610 __ b(double_exit_label());
1611 }
1612
1613 {
1614 __ Bind(float32x4_entry_label());
1615
1616 const Class& cls = compiler->float32x4_class();
1617 const Code& stub =
1618 Code::Handle(StubCode::GetAllocationStubForClass(cls));
1619 const ExternalLabel label(cls.ToCString(), stub.EntryPoint());
1620
1621 LocationSummary* locs = instruction_->locs();
1622 locs->live_registers()->Remove(locs->out());
1623
1624 compiler->SaveLiveRegisters(locs);
1625 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
1626 &label,
1627 PcDescriptors::kOther,
1628 locs);
1629 __ MoveRegister(locs->temp(0).reg(), R0);
1630 compiler->RestoreLiveRegisters(locs);
1631
1632 __ b(float32x4_exit_label());
1633 }
1609 } 1634 }
1610 1635
1636 Label* double_entry_label() {
1637 // Use default SlowPathCode label for double.
1638 return entry_label();
1639 }
1640 Label* double_exit_label() {
1641 // Use default SlowPathCode label for double.
1642 return exit_label();
1643 }
1644
1645 Label* float32x4_entry_label() { return &float32x4_entry_label_; }
1646 Label* float32x4_exit_label() { return &float32x4_exit_label_; }
1647
1611 private: 1648 private:
1649 Label float32x4_entry_label_;
1650 Label float32x4_exit_label_;
1612 StoreInstanceFieldInstr* instruction_; 1651 StoreInstanceFieldInstr* instruction_;
1613 const Class& cls_;
1614 }; 1652 };
1615 1653
1616 1654
1617 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(bool opt) const { 1655 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(bool opt) const {
1618 const intptr_t kNumInputs = 2; 1656 const intptr_t kNumInputs = 2;
1619 const intptr_t kNumTemps = 0; 1657 const intptr_t kNumTemps = 0;
1620 LocationSummary* summary = 1658 LocationSummary* summary =
1621 new LocationSummary(kNumInputs, kNumTemps, 1659 new LocationSummary(kNumInputs, kNumTemps,
1622 (field().guarded_cid() == kIllegalCid) || (is_initialization_) 1660 (field().guarded_cid() == kIllegalCid) || (is_initialization_)
1623 ? LocationSummary::kCallOnSlowPath 1661 ? LocationSummary::kCallOnSlowPath
(...skipping 21 matching lines...) Expand all
1645 } 1683 }
1646 1684
1647 1685
1648 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1686 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1649 Label skip_store; 1687 Label skip_store;
1650 1688
1651 Register instance_reg = locs()->in(0).reg(); 1689 Register instance_reg = locs()->in(0).reg();
1652 1690
1653 if (IsUnboxedStore() && compiler->is_optimizing()) { 1691 if (IsUnboxedStore() && compiler->is_optimizing()) {
1654 DRegister value = EvenDRegisterOf(locs()->in(1).fpu_reg()); 1692 DRegister value = EvenDRegisterOf(locs()->in(1).fpu_reg());
1693 DRegister value_odd = OddDRegisterOf(locs()->in(1).fpu_reg());
1655 Register temp = locs()->temp(0).reg(); 1694 Register temp = locs()->temp(0).reg();
1656 Register temp2 = locs()->temp(1).reg(); 1695 Register temp2 = locs()->temp(1).reg();
1657 const intptr_t cid = field().UnboxedFieldCid(); 1696 const intptr_t cid = field().UnboxedFieldCid();
1658 1697
1659 if (is_initialization_) { 1698 if (is_initialization_) {
1699 StoreInstanceFieldSlowPath* slow_path =
1700 new StoreInstanceFieldSlowPath(this);
1701 compiler->AddSlowPathCode(slow_path);
1702
1660 const Class* cls = NULL; 1703 const Class* cls = NULL;
1704 Label* entry_label = NULL;
1705 Label* exit_label = NULL;
1661 switch (cid) { 1706 switch (cid) {
1662 case kDoubleCid: 1707 case kDoubleCid:
1663 cls = &compiler->double_class(); 1708 cls = &compiler->double_class();
1709 entry_label = slow_path->double_entry_label();
1710 exit_label = slow_path->double_exit_label();
1664 break; 1711 break;
1665 // TODO(johnmccutchan): Add kFloat32x4Cid here. 1712 case kFloat32x4Cid:
1713 cls = &compiler->float32x4_class();
1714 entry_label = slow_path->float32x4_entry_label();
1715 exit_label = slow_path->float32x4_exit_label();
1716 break;
1666 default: 1717 default:
1667 UNREACHABLE(); 1718 UNREACHABLE();
1668 } 1719 }
1669 StoreInstanceFieldSlowPath* slow_path = 1720
1670 new StoreInstanceFieldSlowPath(this, *cls);
1671 compiler->AddSlowPathCode(slow_path);
1672 __ TryAllocate(*cls, 1721 __ TryAllocate(*cls,
1673 slow_path->entry_label(), 1722 entry_label,
1674 temp, 1723 temp,
1675 temp2); 1724 temp2);
1676 __ Bind(slow_path->exit_label()); 1725 __ Bind(exit_label);
1677 __ MoveRegister(temp2, temp); 1726 __ MoveRegister(temp2, temp);
1678 __ StoreIntoObject(instance_reg, 1727 __ StoreIntoObject(instance_reg,
1679 FieldAddress(instance_reg, field().Offset()), 1728 FieldAddress(instance_reg, field().Offset()),
1680 temp2); 1729 temp2);
1681 } else { 1730 } else {
1682 __ ldr(temp, FieldAddress(instance_reg, field().Offset())); 1731 __ ldr(temp, FieldAddress(instance_reg, field().Offset()));
1683 } 1732 }
1684 switch (cid) { 1733 switch (cid) {
1685 case kDoubleCid: 1734 case kDoubleCid:
1686 __ StoreDToOffset(value, temp, Double::value_offset() - kHeapObjectTag); 1735 __ Comment("UnboxedDoubleStoreInstanceFieldInstr");
1687 // TODO(johnmccutchan): Add kFloat32x4Cid here. 1736 __ StoreDToOffset(value, temp, Double::value_offset() - kHeapObjectTag);
1688 break; 1737 break;
1738 case kFloat32x4Cid:
1739 __ Comment("UnboxedFloat32x4StoreInstanceFieldInstr");
1740 __ StoreDToOffset(value, temp,
1741 Float32x4::value_offset() - kHeapObjectTag);
1742 __ StoreDToOffset(value_odd, temp,
1743 Float32x4::value_offset() + 2*kWordSize - kHeapObjectTag);
1744 break;
1689 default: 1745 default:
1690 UNREACHABLE(); 1746 UNREACHABLE();
1691 } 1747 }
1692 1748
1693 return; 1749 return;
1694 } 1750 }
1695 1751
1696 if (IsPotentialUnboxedStore()) { 1752 if (IsPotentialUnboxedStore()) {
1697 Register value_reg = locs()->in(1).reg(); 1753 Register value_reg = locs()->in(1).reg();
1698 Register temp = locs()->temp(0).reg(); 1754 Register temp = locs()->temp(0).reg();
1699 Register temp2 = locs()->temp(1).reg(); 1755 Register temp2 = locs()->temp(1).reg();
1700 DRegister fpu_temp = EvenDRegisterOf(locs()->temp(2).fpu_reg()); 1756 DRegister fpu_temp = EvenDRegisterOf(locs()->temp(2).fpu_reg());
1757 DRegister fpu_temp_odd = OddDRegisterOf(locs()->temp(2).fpu_reg());
1701 1758
1702 Label store_pointer; 1759 Label store_pointer;
1703 Label copy_double;
1704 Label store_double; 1760 Label store_double;
1761 Label store_float32x4;
1705 1762
1706 __ LoadObject(temp, Field::ZoneHandle(field().raw())); 1763 __ LoadObject(temp, Field::ZoneHandle(field().raw()));
1707 1764
1708 __ ldr(temp2, FieldAddress(temp, Field::is_nullable_offset())); 1765 __ ldr(temp2, FieldAddress(temp, Field::is_nullable_offset()));
1709 __ CompareImmediate(temp2, kNullCid); 1766 __ CompareImmediate(temp2, kNullCid);
1710 __ b(&store_pointer, EQ); 1767 __ b(&store_pointer, EQ);
1711 1768
1712 __ ldrb(temp2, FieldAddress(temp, Field::kind_bits_offset())); 1769 __ ldrb(temp2, FieldAddress(temp, Field::kind_bits_offset()));
1713 __ tst(temp2, ShifterOperand(1 << Field::kUnboxingCandidateBit)); 1770 __ tst(temp2, ShifterOperand(1 << Field::kUnboxingCandidateBit));
1714 __ b(&store_pointer, EQ); 1771 __ b(&store_pointer, EQ);
1715 1772
1716 __ ldr(temp2, FieldAddress(temp, Field::guarded_cid_offset())); 1773 __ ldr(temp2, FieldAddress(temp, Field::guarded_cid_offset()));
1717 __ CompareImmediate(temp2, kDoubleCid); 1774 __ CompareImmediate(temp2, kDoubleCid);
1718 __ b(&store_double, EQ); 1775 __ b(&store_double, EQ);
1719 1776
1777 __ ldr(temp2, FieldAddress(temp, Field::guarded_cid_offset()));
1778 __ CompareImmediate(temp2, kFloat32x4Cid);
1779 __ b(&store_float32x4, EQ);
1780
1720 // Fall through. 1781 // Fall through.
1721 __ b(&store_pointer); 1782 __ b(&store_pointer);
1722 1783
1723 __ Bind(&store_double);
1724
1725 __ ldr(temp, FieldAddress(instance_reg, field().Offset()));
1726 __ CompareImmediate(temp,
1727 reinterpret_cast<intptr_t>(Object::null()));
1728 __ b(&copy_double, NE);
1729
1730 StoreInstanceFieldSlowPath* slow_path = 1784 StoreInstanceFieldSlowPath* slow_path =
1731 new StoreInstanceFieldSlowPath(this, compiler->double_class()); 1785 new StoreInstanceFieldSlowPath(this);
1732 compiler->AddSlowPathCode(slow_path); 1786 compiler->AddSlowPathCode(slow_path);
1733 1787
1734 if (!compiler->is_optimizing()) { 1788 {
1735 locs()->live_registers()->Add(locs()->in(0)); 1789 __ Bind(&store_double);
1736 locs()->live_registers()->Add(locs()->in(1)); 1790 Label copy_double;
1791
1792 __ ldr(temp, FieldAddress(instance_reg, field().Offset()));
1793 __ CompareImmediate(temp,
1794 reinterpret_cast<intptr_t>(Object::null()));
1795 __ b(&copy_double, NE);
1796
1797 if (!compiler->is_optimizing()) {
1798 locs()->live_registers()->Add(locs()->in(0));
1799 locs()->live_registers()->Add(locs()->in(1));
1800 }
1801
1802 __ TryAllocate(compiler->double_class(),
1803 slow_path->double_entry_label(),
1804 temp,
1805 temp2);
1806 __ Bind(slow_path->double_exit_label());
1807 __ MoveRegister(temp2, temp);
1808 __ StoreIntoObject(instance_reg,
1809 FieldAddress(instance_reg, field().Offset()),
1810 temp2);
1811 __ Bind(&copy_double);
1812 __ LoadDFromOffset(fpu_temp,
1813 value_reg,
1814 Double::value_offset() - kHeapObjectTag);
1815 __ StoreDToOffset(fpu_temp,
1816 temp,
1817 Double::value_offset() - kHeapObjectTag);
1818 __ b(&skip_store);
1737 } 1819 }
1738 1820
1739 __ TryAllocate(compiler->double_class(), 1821 {
1740 slow_path->entry_label(), 1822 __ Bind(&store_float32x4);
1741 temp, 1823 Label copy_float32x4;
1742 temp2); 1824
1743 __ Bind(slow_path->exit_label()); 1825 __ ldr(temp, FieldAddress(instance_reg, field().Offset()));
1744 __ MoveRegister(temp2, temp); 1826 __ CompareImmediate(temp,
1745 __ StoreIntoObject(instance_reg, 1827 reinterpret_cast<intptr_t>(Object::null()));
1746 FieldAddress(instance_reg, field().Offset()), 1828 __ b(&copy_float32x4, NE);
1747 temp2); 1829
1748 __ Bind(&copy_double); 1830 if (!compiler->is_optimizing()) {
1749 __ LoadDFromOffset(fpu_temp, 1831 locs()->live_registers()->Add(locs()->in(0));
1750 value_reg, 1832 locs()->live_registers()->Add(locs()->in(1));
1751 Double::value_offset() - kHeapObjectTag); 1833 }
1752 __ StoreDToOffset(fpu_temp, temp, Double::value_offset() - kHeapObjectTag); 1834
1753 __ b(&skip_store); 1835 __ TryAllocate(compiler->float32x4_class(),
1836 slow_path->float32x4_entry_label(),
1837 temp,
1838 temp2);
1839 __ Bind(slow_path->float32x4_exit_label());
1840 __ MoveRegister(temp2, temp);
1841 __ StoreIntoObject(instance_reg,
1842 FieldAddress(instance_reg, field().Offset()),
1843 temp2);
1844 __ Bind(&copy_float32x4);
1845 __ LoadDFromOffset(fpu_temp, value_reg,
1846 Float32x4::value_offset() - kHeapObjectTag);
1847 __ LoadDFromOffset(fpu_temp_odd, value_reg,
1848 Float32x4::value_offset() + 2*kWordSize - kHeapObjectTag);
1849 __ StoreDToOffset(fpu_temp, temp,
1850 Float32x4::value_offset() - kHeapObjectTag);
1851 __ StoreDToOffset(fpu_temp_odd, temp,
1852 Float32x4::value_offset() + 2*kWordSize - kHeapObjectTag);
1853 __ b(&skip_store);
1854 }
1855
1754 __ Bind(&store_pointer); 1856 __ Bind(&store_pointer);
1755 } 1857 }
1756 1858
1757 if (ShouldEmitStoreBarrier()) { 1859 if (ShouldEmitStoreBarrier()) {
1758 Register value_reg = locs()->in(1).reg(); 1860 Register value_reg = locs()->in(1).reg();
1759 __ StoreIntoObject(instance_reg, 1861 __ StoreIntoObject(instance_reg,
1760 FieldAddress(instance_reg, field().Offset()), 1862 FieldAddress(instance_reg, field().Offset()),
1761 value_reg, 1863 value_reg,
1762 CanValueBeSmi()); 1864 CanValueBeSmi());
1763 } else { 1865 } else {
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
1887 deopt_id(), 1989 deopt_id(),
1888 kAllocateObjectWithBoundsCheckRuntimeEntry, 1990 kAllocateObjectWithBoundsCheckRuntimeEntry,
1889 3, 1991 3,
1890 locs()); 1992 locs());
1891 __ Drop(3); 1993 __ Drop(3);
1892 ASSERT(locs()->out().reg() == R0); 1994 ASSERT(locs()->out().reg() == R0);
1893 __ Pop(R0); // Pop new instance. 1995 __ Pop(R0); // Pop new instance.
1894 } 1996 }
1895 1997
1896 1998
1897 class BoxDoubleSlowPath : public SlowPathCode { 1999 class LoadFieldSlowPath : public SlowPathCode {
1898 public: 2000 public:
1899 explicit BoxDoubleSlowPath(Instruction* instruction) 2001 explicit LoadFieldSlowPath(Instruction* instruction)
1900 : instruction_(instruction) { } 2002 : instruction_(instruction) { }
1901 2003
1902 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { 2004 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
1903 __ Comment("BoxDoubleSlowPath"); 2005 __ Comment("LoadFieldSlowPath");
1904 __ Bind(entry_label()); 2006 {
1905 const Class& double_class = compiler->double_class(); 2007 __ Bind(double_entry_label());
1906 const Code& stub = 2008 const Class& double_class = compiler->double_class();
1907 Code::Handle(StubCode::GetAllocationStubForClass(double_class)); 2009 const Code& stub =
1908 const ExternalLabel label(double_class.ToCString(), stub.EntryPoint()); 2010 Code::Handle(StubCode::GetAllocationStubForClass(double_class));
2011 const ExternalLabel label(double_class.ToCString(), stub.EntryPoint());
1909 2012
1910 LocationSummary* locs = instruction_->locs(); 2013 LocationSummary* locs = instruction_->locs();
1911 locs->live_registers()->Remove(locs->out()); 2014 locs->live_registers()->Remove(locs->out());
1912 2015
1913 compiler->SaveLiveRegisters(locs); 2016 compiler->SaveLiveRegisters(locs);
1914 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position. 2017 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
1915 &label, 2018 &label,
1916 PcDescriptors::kOther, 2019 PcDescriptors::kOther,
1917 locs); 2020 locs);
1918 __ MoveRegister(locs->out().reg(), R0); 2021 __ MoveRegister(locs->out().reg(), R0);
1919 compiler->RestoreLiveRegisters(locs); 2022 compiler->RestoreLiveRegisters(locs);
1920 2023
1921 __ b(exit_label()); 2024 __ b(double_exit_label());
2025 }
2026 {
2027 __ Bind(float32x4_entry_label());
2028 const Class& float32x4_class = compiler->float32x4_class();
2029 const Code& stub =
2030 Code::Handle(StubCode::GetAllocationStubForClass(float32x4_class));
2031 const ExternalLabel label(float32x4_class.ToCString(), stub.EntryPoint());
2032
2033 LocationSummary* locs = instruction_->locs();
2034 locs->live_registers()->Remove(locs->out());
2035
2036 compiler->SaveLiveRegisters(locs);
2037 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
2038 &label,
2039 PcDescriptors::kOther,
2040 locs);
2041 __ MoveRegister(locs->out().reg(), R0);
2042 compiler->RestoreLiveRegisters(locs);
2043
2044 __ b(float32x4_exit_label());
2045 }
1922 } 2046 }
1923 2047
2048 Label* double_entry_label() {
2049 // Use default SlowPathCode label for double.
2050 return entry_label();
2051 }
2052 Label* double_exit_label() {
2053 // Use default SlowPathCode label for double.
2054 return exit_label();
2055 }
2056
2057 Label* float32x4_entry_label() { return &float32x4_entry_label_; }
2058 Label* float32x4_exit_label() { return &float32x4_exit_label_; }
2059
1924 private: 2060 private:
2061 Label float32x4_entry_label_;
2062 Label float32x4_exit_label_;
1925 Instruction* instruction_; 2063 Instruction* instruction_;
1926 }; 2064 };
1927 2065
1928 2066
1929 LocationSummary* LoadFieldInstr::MakeLocationSummary(bool opt) const { 2067 LocationSummary* LoadFieldInstr::MakeLocationSummary(bool opt) const {
1930 const intptr_t kNumInputs = 1; 2068 const intptr_t kNumInputs = 1;
1931 const intptr_t kNumTemps = 0; 2069 const intptr_t kNumTemps = 0;
1932 LocationSummary* locs = 2070 LocationSummary* locs =
1933 new LocationSummary( 2071 new LocationSummary(
1934 kNumInputs, kNumTemps, 2072 kNumInputs, kNumTemps,
(...skipping 12 matching lines...) Expand all
1947 } 2085 }
1948 locs->set_out(Location::RequiresRegister()); 2086 locs->set_out(Location::RequiresRegister());
1949 return locs; 2087 return locs;
1950 } 2088 }
1951 2089
1952 2090
1953 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2091 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1954 Register instance_reg = locs()->in(0).reg(); 2092 Register instance_reg = locs()->in(0).reg();
1955 if (IsUnboxedLoad() && compiler->is_optimizing()) { 2093 if (IsUnboxedLoad() && compiler->is_optimizing()) {
1956 DRegister result = EvenDRegisterOf(locs()->out().fpu_reg()); 2094 DRegister result = EvenDRegisterOf(locs()->out().fpu_reg());
2095 DRegister result_odd = OddDRegisterOf(locs()->out().fpu_reg());
1957 Register temp = locs()->temp(0).reg(); 2096 Register temp = locs()->temp(0).reg();
1958 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes())); 2097 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes()));
1959 intptr_t cid = field()->UnboxedFieldCid(); 2098 intptr_t cid = field()->UnboxedFieldCid();
1960 switch (cid) { 2099 switch (cid) {
1961 case kDoubleCid: 2100 case kDoubleCid:
2101 __ Comment("UnboxedDoubleLoadFieldInstr");
1962 __ LoadDFromOffset(result, temp, 2102 __ LoadDFromOffset(result, temp,
1963 Double::value_offset() - kHeapObjectTag); 2103 Double::value_offset() - kHeapObjectTag);
1964 break; 2104 break;
1965 // TODO(johnmccutchan): Add Float32x4 path here. 2105 case kFloat32x4Cid:
2106 __ Comment("UnboxedFloat32x4LoadFieldInstr");
2107 __ LoadDFromOffset(result, temp,
2108 Float32x4::value_offset() - kHeapObjectTag);
2109 __ LoadDFromOffset(result_odd, temp,
2110 Float32x4::value_offset() + 2*kWordSize - kHeapObjectTag);
2111 break;
1966 default: 2112 default:
1967 UNREACHABLE(); 2113 UNREACHABLE();
1968 } 2114 }
1969 return; 2115 return;
1970 } 2116 }
1971 2117
1972 Label done; 2118 Label done;
1973 Register result_reg = locs()->out().reg(); 2119 Register result_reg = locs()->out().reg();
1974 if (IsPotentialUnboxedLoad()) { 2120 if (IsPotentialUnboxedLoad()) {
1975 Register temp = locs()->temp(1).reg(); 2121 Register temp = locs()->temp(1).reg();
1976 DRegister value = EvenDRegisterOf(locs()->temp(0).fpu_reg()); 2122 DRegister value = EvenDRegisterOf(locs()->temp(0).fpu_reg());
2123 DRegister value_odd = OddDRegisterOf(locs()->temp(0).fpu_reg());
2124
2125 LoadFieldSlowPath* slow_path = new LoadFieldSlowPath(this);
2126 compiler->AddSlowPathCode(slow_path);
1977 2127
1978 Label load_pointer; 2128 Label load_pointer;
1979 Label load_double; 2129 Label load_double;
2130 Label load_float32x4;
1980 2131
1981 __ LoadObject(result_reg, Field::ZoneHandle(field()->raw())); 2132 __ LoadObject(result_reg, Field::ZoneHandle(field()->raw()));
1982 2133
1983 FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset()); 2134 FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset());
1984 FieldAddress field_nullability_operand(result_reg, 2135 FieldAddress field_nullability_operand(result_reg,
1985 Field::is_nullable_offset()); 2136 Field::is_nullable_offset());
1986 2137
1987 __ ldr(temp, field_nullability_operand); 2138 __ ldr(temp, field_nullability_operand);
1988 __ CompareImmediate(temp, kNullCid); 2139 __ CompareImmediate(temp, kNullCid);
1989 __ b(&load_pointer, EQ); 2140 __ b(&load_pointer, EQ);
1990 2141
1991 __ ldr(temp, field_cid_operand); 2142 __ ldr(temp, field_cid_operand);
1992 __ CompareImmediate(temp, kDoubleCid); 2143 __ CompareImmediate(temp, kDoubleCid);
1993 __ b(&load_double, EQ); 2144 __ b(&load_double, EQ);
1994 2145
2146 __ ldr(temp, field_cid_operand);
2147 __ CompareImmediate(temp, kFloat32x4Cid);
2148 __ b(&load_float32x4, EQ);
2149
1995 // Fall through. 2150 // Fall through.
1996 __ b(&load_pointer); 2151 __ b(&load_pointer);
1997 2152
1998 __ Bind(&load_double); 2153 {
2154 __ Bind(&load_double);
1999 2155
2000 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this); 2156 if (!compiler->is_optimizing()) {
2001 compiler->AddSlowPathCode(slow_path); 2157 locs()->live_registers()->Add(locs()->in(0));
2158 }
2002 2159
2003 if (!compiler->is_optimizing()) { 2160 __ TryAllocate(compiler->double_class(),
2004 locs()->live_registers()->Add(locs()->in(0)); 2161 slow_path->double_entry_label(),
2162 result_reg,
2163 temp);
2164 __ Bind(slow_path->double_exit_label());
2165 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes()));
2166 __ LoadDFromOffset(value, temp, Double::value_offset() - kHeapObjectTag);
2167 __ StoreDToOffset(value,
2168 result_reg,
2169 Double::value_offset() - kHeapObjectTag);
2170 __ b(&done);
2005 } 2171 }
2006 2172
2007 __ TryAllocate(compiler->double_class(), 2173 {
2008 slow_path->entry_label(), 2174 __ Bind(&load_float32x4);
2009 result_reg,
2010 temp);
2011 __ Bind(slow_path->exit_label());
2012 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes()));
2013 __ LoadDFromOffset(value, temp, Double::value_offset() - kHeapObjectTag);
2014 __ StoreDToOffset(value,
2015 result_reg,
2016 Double::value_offset() - kHeapObjectTag);
2017 __ b(&done);
2018 2175
2019 // TODO(johnmccutchan): Add Float32x4 path here. 2176 if (!compiler->is_optimizing()) {
2177 locs()->live_registers()->Add(locs()->in(0));
2178 }
2179
2180 __ TryAllocate(compiler->float32x4_class(),
2181 slow_path->float32x4_entry_label(),
2182 result_reg,
2183 temp);
2184 __ Bind(slow_path->float32x4_exit_label());
2185 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes()));
2186 __ LoadDFromOffset(value, temp,
2187 Float32x4::value_offset() - kHeapObjectTag);
2188 __ LoadDFromOffset(value_odd, temp,
2189 Float32x4::value_offset() + 2*kWordSize - kHeapObjectTag);
2190 __ StoreDToOffset(value, result_reg,
2191 Float32x4::value_offset() - kHeapObjectTag);
2192 __ StoreDToOffset(value_odd, result_reg,
2193 Float32x4::value_offset() + 2*kWordSize - kHeapObjectTag);
2194 __ b(&done);
2195 }
2020 2196
2021 __ Bind(&load_pointer); 2197 __ Bind(&load_pointer);
2022 } 2198 }
2023 __ LoadFromOffset(kWord, result_reg, 2199 __ LoadFromOffset(kWord, result_reg,
2024 instance_reg, offset_in_bytes() - kHeapObjectTag); 2200 instance_reg, offset_in_bytes() - kHeapObjectTag);
2025 __ Bind(&done); 2201 __ Bind(&done);
2026 } 2202 }
2027 2203
2028 2204
2029 LocationSummary* InstantiateTypeInstr::MakeLocationSummary(bool opt) const { 2205 LocationSummary* InstantiateTypeInstr::MakeLocationSummary(bool opt) const {
(...skipping 787 matching lines...) Expand 10 before | Expand all | Expand 10 after
2817 } else if (right_cid == kSmiCid) { 2993 } else if (right_cid == kSmiCid) {
2818 __ tst(left, ShifterOperand(kSmiTagMask)); 2994 __ tst(left, ShifterOperand(kSmiTagMask));
2819 } else { 2995 } else {
2820 __ orr(IP, left, ShifterOperand(right)); 2996 __ orr(IP, left, ShifterOperand(right));
2821 __ tst(IP, ShifterOperand(kSmiTagMask)); 2997 __ tst(IP, ShifterOperand(kSmiTagMask));
2822 } 2998 }
2823 __ b(deopt, EQ); 2999 __ b(deopt, EQ);
2824 } 3000 }
2825 3001
2826 3002
3003 class BoxDoubleSlowPath : public SlowPathCode {
3004 public:
3005 explicit BoxDoubleSlowPath(BoxDoubleInstr* instruction)
3006 : instruction_(instruction) { }
3007
3008 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
3009 __ Comment("BoxDoubleSlowPath");
3010 __ Bind(entry_label());
3011 const Class& double_class = compiler->double_class();
3012 const Code& stub =
3013 Code::Handle(StubCode::GetAllocationStubForClass(double_class));
3014 const ExternalLabel label(double_class.ToCString(), stub.EntryPoint());
3015
3016 LocationSummary* locs = instruction_->locs();
3017 locs->live_registers()->Remove(locs->out());
3018
3019 compiler->SaveLiveRegisters(locs);
3020 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
3021 &label,
3022 PcDescriptors::kOther,
3023 locs);
3024 __ MoveRegister(locs->out().reg(), R0);
3025 compiler->RestoreLiveRegisters(locs);
3026
3027 __ b(exit_label());
3028 }
3029
3030 private:
3031 BoxDoubleInstr* instruction_;
3032 };
3033
3034
2827 LocationSummary* BoxDoubleInstr::MakeLocationSummary(bool opt) const { 3035 LocationSummary* BoxDoubleInstr::MakeLocationSummary(bool opt) const {
2828 const intptr_t kNumInputs = 1; 3036 const intptr_t kNumInputs = 1;
2829 const intptr_t kNumTemps = 1; 3037 const intptr_t kNumTemps = 1;
2830 LocationSummary* summary = 3038 LocationSummary* summary =
2831 new LocationSummary(kNumInputs, 3039 new LocationSummary(kNumInputs,
2832 kNumTemps, 3040 kNumTemps,
2833 LocationSummary::kCallOnSlowPath); 3041 LocationSummary::kCallOnSlowPath);
2834 summary->set_in(0, Location::RequiresFpuRegister()); 3042 summary->set_in(0, Location::RequiresFpuRegister());
2835 summary->set_temp(0, Location::RequiresRegister()); 3043 summary->set_temp(0, Location::RequiresRegister());
2836 summary->set_out(Location::RequiresRegister()); 3044 summary->set_out(Location::RequiresRegister());
(...skipping 2050 matching lines...) Expand 10 before | Expand all | Expand 10 after
4887 compiler->GenerateCall(token_pos(), 5095 compiler->GenerateCall(token_pos(),
4888 &label, 5096 &label,
4889 PcDescriptors::kOther, 5097 PcDescriptors::kOther,
4890 locs()); 5098 locs());
4891 __ Drop(2); // Discard type arguments and receiver. 5099 __ Drop(2); // Discard type arguments and receiver.
4892 } 5100 }
4893 5101
4894 } // namespace dart 5102 } // namespace dart
4895 5103
4896 #endif // defined TARGET_ARCH_ARM 5104 #endif // defined TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698