Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(134)

Side by Side Diff: runtime/vm/intermediate_language_arm.cc

Issue 136753012: Refactor unboxed fields in preparation of reusable SIMD boxes (Closed) Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 9
10 #include "vm/dart_entry.h" 10 #include "vm/dart_entry.h"
(...skipping 1565 matching lines...) Expand 10 before | Expand all | Expand 10 after
1576 UNREACHABLE(); 1576 UNREACHABLE();
1577 } 1577 }
1578 } 1578 }
1579 } 1579 }
1580 __ Bind(&ok); 1580 __ Bind(&ok);
1581 } 1581 }
1582 1582
1583 1583
1584 class StoreInstanceFieldSlowPath : public SlowPathCode { 1584 class StoreInstanceFieldSlowPath : public SlowPathCode {
1585 public: 1585 public:
1586 explicit StoreInstanceFieldSlowPath(StoreInstanceFieldInstr* instruction) 1586 explicit StoreInstanceFieldSlowPath(const Class& cls,
srdjan 2014/01/30 23:58:39 Remove explicit. I would put cls as second argumen
Cutch 2014/01/31 00:18:49 Done.
1587 : instruction_(instruction) { } 1587 StoreInstanceFieldInstr* instruction)
1588 : instruction_(instruction), cls_(cls) { }
1588 1589
1589 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { 1590 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
1590 __ Comment("StoreInstanceFieldSlowPath"); 1591 __ Comment("StoreInstanceFieldSlowPath");
1591 __ Bind(entry_label()); 1592 __ Bind(entry_label());
1592 const Class& double_class = compiler->double_class();
1593 const Code& stub = 1593 const Code& stub =
1594 Code::Handle(StubCode::GetAllocationStubForClass(double_class)); 1594 Code::Handle(StubCode::GetAllocationStubForClass(cls_));
1595 const ExternalLabel label(double_class.ToCString(), stub.EntryPoint()); 1595 const ExternalLabel label(cls_.ToCString(), stub.EntryPoint());
1596 1596
1597 LocationSummary* locs = instruction_->locs(); 1597 LocationSummary* locs = instruction_->locs();
1598 locs->live_registers()->Remove(locs->out()); 1598 locs->live_registers()->Remove(locs->out());
1599 1599
1600 compiler->SaveLiveRegisters(locs); 1600 compiler->SaveLiveRegisters(locs);
1601 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position. 1601 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
1602 &label, 1602 &label,
1603 PcDescriptors::kOther, 1603 PcDescriptors::kOther,
1604 locs); 1604 locs);
1605 __ MoveRegister(locs->temp(0).reg(), R0); 1605 __ MoveRegister(locs->temp(0).reg(), R0);
1606 compiler->RestoreLiveRegisters(locs); 1606 compiler->RestoreLiveRegisters(locs);
1607 1607
1608 __ b(exit_label()); 1608 __ b(exit_label());
1609 } 1609 }
1610 1610
1611 private: 1611 private:
1612 StoreInstanceFieldInstr* instruction_; 1612 StoreInstanceFieldInstr* instruction_;
1613 const Class& cls_;
1613 }; 1614 };
1614 1615
1615 1616
1616 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(bool opt) const { 1617 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(bool opt) const {
1617 const intptr_t kNumInputs = 2; 1618 const intptr_t kNumInputs = 2;
1618 const intptr_t kNumTemps = 0; 1619 const intptr_t kNumTemps = 0;
1619 LocationSummary* summary = 1620 LocationSummary* summary =
1620 new LocationSummary(kNumInputs, kNumTemps, 1621 new LocationSummary(kNumInputs, kNumTemps,
1621 (field().guarded_cid() == kIllegalCid) || (is_initialization_) 1622 (field().guarded_cid() == kIllegalCid) || (is_initialization_)
1622 ? LocationSummary::kCallOnSlowPath 1623 ? LocationSummary::kCallOnSlowPath
(...skipping 23 matching lines...) Expand all
1646 1647
1647 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1648 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1648 Label skip_store; 1649 Label skip_store;
1649 1650
1650 Register instance_reg = locs()->in(0).reg(); 1651 Register instance_reg = locs()->in(0).reg();
1651 1652
1652 if (IsUnboxedStore() && compiler->is_optimizing()) { 1653 if (IsUnboxedStore() && compiler->is_optimizing()) {
1653 DRegister value = EvenDRegisterOf(locs()->in(1).fpu_reg()); 1654 DRegister value = EvenDRegisterOf(locs()->in(1).fpu_reg());
1654 Register temp = locs()->temp(0).reg(); 1655 Register temp = locs()->temp(0).reg();
1655 Register temp2 = locs()->temp(1).reg(); 1656 Register temp2 = locs()->temp(1).reg();
1657 const intptr_t cid = field().UnboxedFieldCid();
1656 1658
1657 if (is_initialization_) { 1659 if (is_initialization_) {
1660 const Class* cls = NULL;
1661 switch (cid) {
1662 case kDoubleCid:
1663 cls = &compiler->double_class();
1664 break;
1665 // TODO(johnmccutchan): Add kFloat32x4Cid here.
1666 default:
1667 UNREACHABLE();
1668 }
1658 StoreInstanceFieldSlowPath* slow_path = 1669 StoreInstanceFieldSlowPath* slow_path =
1659 new StoreInstanceFieldSlowPath(this); 1670 new StoreInstanceFieldSlowPath(*cls, this);
1660 compiler->AddSlowPathCode(slow_path); 1671 compiler->AddSlowPathCode(slow_path);
1661 __ TryAllocate(compiler->double_class(), 1672 __ TryAllocate(*cls,
1662 slow_path->entry_label(), 1673 slow_path->entry_label(),
1663 temp, 1674 temp,
1664 temp2); 1675 temp2);
1665 __ Bind(slow_path->exit_label()); 1676 __ Bind(slow_path->exit_label());
1666 __ MoveRegister(temp2, temp); 1677 __ MoveRegister(temp2, temp);
1667 __ StoreIntoObject(instance_reg, 1678 __ StoreIntoObject(instance_reg,
1668 FieldAddress(instance_reg, field().Offset()), 1679 FieldAddress(instance_reg, field().Offset()),
1669 temp2); 1680 temp2);
1670 } else { 1681 } else {
1671 __ ldr(temp, FieldAddress(instance_reg, field().Offset())); 1682 __ ldr(temp, FieldAddress(instance_reg, field().Offset()));
1672 } 1683 }
1673 __ StoreDToOffset(value, temp, Double::value_offset() - kHeapObjectTag); 1684 switch (cid) {
1685 case kDoubleCid:
1686 __ StoreDToOffset(value, temp, Double::value_offset() - kHeapObjectTag);
1687 // TODO(johnmccutchan): Add kFloat32x4Cid here.
1688 break;
1689 default:
1690 UNREACHABLE();
1691 }
1692
1674 return; 1693 return;
1675 } 1694 }
1676 1695
1677 if (IsPotentialUnboxedStore()) { 1696 if (IsPotentialUnboxedStore()) {
1678 Register value_reg = locs()->in(1).reg(); 1697 Register value_reg = locs()->in(1).reg();
1679 Register temp = locs()->temp(0).reg(); 1698 Register temp = locs()->temp(0).reg();
1680 Register temp2 = locs()->temp(1).reg(); 1699 Register temp2 = locs()->temp(1).reg();
1681 DRegister fpu_temp = EvenDRegisterOf(locs()->temp(2).fpu_reg()); 1700 DRegister fpu_temp = EvenDRegisterOf(locs()->temp(2).fpu_reg());
1682 1701
1683 Label store_pointer, copy_payload; 1702 Label store_pointer;
1703 Label copy_double;
1704 Label store_double;
1705
1684 __ LoadObject(temp, Field::ZoneHandle(field().raw())); 1706 __ LoadObject(temp, Field::ZoneHandle(field().raw()));
1685 __ ldr(temp2, FieldAddress(temp, Field::guarded_cid_offset())); 1707
1686 __ CompareImmediate(temp2, kDoubleCid);
1687 __ b(&store_pointer, NE);
1688 __ ldr(temp2, FieldAddress(temp, Field::is_nullable_offset())); 1708 __ ldr(temp2, FieldAddress(temp, Field::is_nullable_offset()));
1689 __ CompareImmediate(temp2, kNullCid); 1709 __ CompareImmediate(temp2, kNullCid);
1690 __ b(&store_pointer, EQ); 1710 __ b(&store_pointer, EQ);
1711
1691 __ ldrb(temp2, FieldAddress(temp, Field::kind_bits_offset())); 1712 __ ldrb(temp2, FieldAddress(temp, Field::kind_bits_offset()));
1692 __ tst(temp2, ShifterOperand(1 << Field::kUnboxingCandidateBit)); 1713 __ tst(temp2, ShifterOperand(1 << Field::kUnboxingCandidateBit));
1693 __ b(&store_pointer, EQ); 1714 __ b(&store_pointer, EQ);
1694 1715
1716 __ ldr(temp2, FieldAddress(temp, Field::guarded_cid_offset()));
1717 __ CompareImmediate(temp2, kDoubleCid);
1718 __ b(&store_double, EQ);
1719
1720 // Fall through.
1721 __ b(&store_pointer);
1722
1723 __ Bind(&store_double);
1724
1695 __ ldr(temp, FieldAddress(instance_reg, field().Offset())); 1725 __ ldr(temp, FieldAddress(instance_reg, field().Offset()));
1696 __ CompareImmediate(temp, 1726 __ CompareImmediate(temp,
1697 reinterpret_cast<intptr_t>(Object::null())); 1727 reinterpret_cast<intptr_t>(Object::null()));
1698 __ b(&copy_payload, NE); 1728 __ b(&copy_double, NE);
1699 1729
1700 StoreInstanceFieldSlowPath* slow_path = 1730 StoreInstanceFieldSlowPath* slow_path =
1701 new StoreInstanceFieldSlowPath(this); 1731 new StoreInstanceFieldSlowPath(compiler->double_class(), this);
1702 compiler->AddSlowPathCode(slow_path); 1732 compiler->AddSlowPathCode(slow_path);
1703 1733
1704 if (!compiler->is_optimizing()) { 1734 if (!compiler->is_optimizing()) {
1705 locs()->live_registers()->Add(locs()->in(0)); 1735 locs()->live_registers()->Add(locs()->in(0));
1706 locs()->live_registers()->Add(locs()->in(1)); 1736 locs()->live_registers()->Add(locs()->in(1));
1707 } 1737 }
1708 1738
1709 __ TryAllocate(compiler->double_class(), 1739 __ TryAllocate(compiler->double_class(),
1710 slow_path->entry_label(), 1740 slow_path->entry_label(),
1711 temp, 1741 temp,
1712 temp2); 1742 temp2);
1713 __ Bind(slow_path->exit_label()); 1743 __ Bind(slow_path->exit_label());
1714 __ MoveRegister(temp2, temp); 1744 __ MoveRegister(temp2, temp);
1715 __ StoreIntoObject(instance_reg, 1745 __ StoreIntoObject(instance_reg,
1716 FieldAddress(instance_reg, field().Offset()), 1746 FieldAddress(instance_reg, field().Offset()),
1717 temp2); 1747 temp2);
1718 __ Bind(&copy_payload); 1748 __ Bind(&copy_double);
1719 __ LoadDFromOffset(fpu_temp, 1749 __ LoadDFromOffset(fpu_temp,
1720 value_reg, 1750 value_reg,
1721 Double::value_offset() - kHeapObjectTag); 1751 Double::value_offset() - kHeapObjectTag);
1722 __ StoreDToOffset(fpu_temp, temp, Double::value_offset() - kHeapObjectTag); 1752 __ StoreDToOffset(fpu_temp, temp, Double::value_offset() - kHeapObjectTag);
1723 __ b(&skip_store); 1753 __ b(&skip_store);
1724 __ Bind(&store_pointer); 1754 __ Bind(&store_pointer);
1725 } 1755 }
1726 1756
1727 if (ShouldEmitStoreBarrier()) { 1757 if (ShouldEmitStoreBarrier()) {
1728 Register value_reg = locs()->in(1).reg(); 1758 Register value_reg = locs()->in(1).reg();
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after
1918 return locs; 1948 return locs;
1919 } 1949 }
1920 1950
1921 1951
1922 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1952 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1923 Register instance_reg = locs()->in(0).reg(); 1953 Register instance_reg = locs()->in(0).reg();
1924 if (IsUnboxedLoad() && compiler->is_optimizing()) { 1954 if (IsUnboxedLoad() && compiler->is_optimizing()) {
1925 DRegister result = EvenDRegisterOf(locs()->out().fpu_reg()); 1955 DRegister result = EvenDRegisterOf(locs()->out().fpu_reg());
1926 Register temp = locs()->temp(0).reg(); 1956 Register temp = locs()->temp(0).reg();
1927 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes())); 1957 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes()));
1928 __ LoadDFromOffset(result, temp, Double::value_offset() - kHeapObjectTag); 1958 intptr_t cid = field()->UnboxedFieldCid();
1959 switch (cid) {
1960 case kDoubleCid:
1961 __ LoadDFromOffset(result, temp,
1962 Double::value_offset() - kHeapObjectTag);
1963 break;
1964 // TODO(johnmccutchan): Add Float32x4 path here.
1965 default:
1966 UNREACHABLE();
1967 }
1929 return; 1968 return;
1930 } 1969 }
1931 1970
1932 Label done; 1971 Label done;
1933 Register result_reg = locs()->out().reg(); 1972 Register result_reg = locs()->out().reg();
1934 if (IsPotentialUnboxedLoad()) { 1973 if (IsPotentialUnboxedLoad()) {
1935 Register temp = locs()->temp(1).reg(); 1974 Register temp = locs()->temp(1).reg();
1936 DRegister value = EvenDRegisterOf(locs()->temp(0).fpu_reg()); 1975 DRegister value = EvenDRegisterOf(locs()->temp(0).fpu_reg());
1937 1976
1938 Label load_pointer; 1977 Label load_pointer;
1978 Label load_double;
1979
1939 __ LoadObject(result_reg, Field::ZoneHandle(field()->raw())); 1980 __ LoadObject(result_reg, Field::ZoneHandle(field()->raw()));
1940 1981
1941 FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset()); 1982 FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset());
1942 FieldAddress field_nullability_operand(result_reg, 1983 FieldAddress field_nullability_operand(result_reg,
1943 Field::is_nullable_offset()); 1984 Field::is_nullable_offset());
1944 1985
1945 __ ldr(temp, field_cid_operand);
1946 __ CompareImmediate(temp, kDoubleCid);
1947 __ b(&load_pointer, NE);
1948
1949 __ ldr(temp, field_nullability_operand); 1986 __ ldr(temp, field_nullability_operand);
1950 __ CompareImmediate(temp, kNullCid); 1987 __ CompareImmediate(temp, kNullCid);
1951 __ b(&load_pointer, EQ); 1988 __ b(&load_pointer, EQ);
1952 1989
1990 __ ldr(temp, field_cid_operand);
1991 __ CompareImmediate(temp, kDoubleCid);
1992 __ b(&load_double, EQ);
1993
1994 // Fall through.
1995 __ b(&load_pointer);
1996
1997 __ Bind(&load_double);
1998
1953 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this); 1999 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this);
1954 compiler->AddSlowPathCode(slow_path); 2000 compiler->AddSlowPathCode(slow_path);
1955 2001
1956 if (!compiler->is_optimizing()) { 2002 if (!compiler->is_optimizing()) {
1957 locs()->live_registers()->Add(locs()->in(0)); 2003 locs()->live_registers()->Add(locs()->in(0));
1958 } 2004 }
1959 2005
1960 __ TryAllocate(compiler->double_class(), 2006 __ TryAllocate(compiler->double_class(),
1961 slow_path->entry_label(), 2007 slow_path->entry_label(),
1962 result_reg, 2008 result_reg,
1963 temp); 2009 temp);
1964 __ Bind(slow_path->exit_label()); 2010 __ Bind(slow_path->exit_label());
1965 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes())); 2011 __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes()));
1966 __ LoadDFromOffset(value, temp, Double::value_offset() - kHeapObjectTag); 2012 __ LoadDFromOffset(value, temp, Double::value_offset() - kHeapObjectTag);
1967 __ StoreDToOffset(value, 2013 __ StoreDToOffset(value,
1968 result_reg, 2014 result_reg,
1969 Double::value_offset() - kHeapObjectTag); 2015 Double::value_offset() - kHeapObjectTag);
1970 __ b(&done); 2016 __ b(&done);
2017
2018 // TODO(johnmccutchan): Add Float32x4 path here.
2019
1971 __ Bind(&load_pointer); 2020 __ Bind(&load_pointer);
1972 } 2021 }
1973 __ LoadFromOffset(kWord, result_reg, 2022 __ LoadFromOffset(kWord, result_reg,
1974 instance_reg, offset_in_bytes() - kHeapObjectTag); 2023 instance_reg, offset_in_bytes() - kHeapObjectTag);
1975 __ Bind(&done); 2024 __ Bind(&done);
1976 } 2025 }
1977 2026
1978 2027
1979 LocationSummary* InstantiateTypeInstr::MakeLocationSummary(bool opt) const { 2028 LocationSummary* InstantiateTypeInstr::MakeLocationSummary(bool opt) const {
1980 const intptr_t kNumInputs = 1; 2029 const intptr_t kNumInputs = 1;
(...skipping 2856 matching lines...) Expand 10 before | Expand all | Expand 10 after
4837 compiler->GenerateCall(token_pos(), 4886 compiler->GenerateCall(token_pos(),
4838 &label, 4887 &label,
4839 PcDescriptors::kOther, 4888 PcDescriptors::kOther,
4840 locs()); 4889 locs());
4841 __ Drop(2); // Discard type arguments and receiver. 4890 __ Drop(2); // Discard type arguments and receiver.
4842 } 4891 }
4843 4892
4844 } // namespace dart 4893 } // namespace dart
4845 4894
4846 #endif // defined TARGET_ARCH_ARM 4895 #endif // defined TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698