Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(192)

Side by Side Diff: runtime/vm/intermediate_language_x64.cc

Issue 136753012: Refactor unboxed fields in preparation of reusable SIMD boxes (Closed) Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6 #if defined(TARGET_ARCH_X64) 6 #if defined(TARGET_ARCH_X64)
7 7
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 9
10 #include "vm/dart_entry.h" 10 #include "vm/dart_entry.h"
(...skipping 1474 matching lines...) Expand 10 before | Expand all | Expand 10 after
1485 UNREACHABLE(); 1485 UNREACHABLE();
1486 } 1486 }
1487 } 1487 }
1488 } 1488 }
1489 __ Bind(&ok); 1489 __ Bind(&ok);
1490 } 1490 }
1491 1491
1492 1492
1493 class StoreInstanceFieldSlowPath : public SlowPathCode { 1493 class StoreInstanceFieldSlowPath : public SlowPathCode {
1494 public: 1494 public:
1495 explicit StoreInstanceFieldSlowPath(StoreInstanceFieldInstr* instruction) 1495 explicit StoreInstanceFieldSlowPath(const Class& cls,
1496 : instruction_(instruction) { } 1496 StoreInstanceFieldInstr* instruction)
1497 : instruction_(instruction), cls_(cls) { }
1497 1498
1498 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { 1499 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
1499 __ Comment("StoreInstanceFieldSlowPath"); 1500 __ Comment("StoreInstanceFieldSlowPath");
1500 __ Bind(entry_label()); 1501 __ Bind(entry_label());
1501 const Class& double_class = compiler->double_class();
1502 const Code& stub = 1502 const Code& stub =
1503 Code::Handle(StubCode::GetAllocationStubForClass(double_class)); 1503 Code::Handle(StubCode::GetAllocationStubForClass(cls_));
1504 const ExternalLabel label(double_class.ToCString(), stub.EntryPoint()); 1504 const ExternalLabel label(cls_.ToCString(), stub.EntryPoint());
1505 1505
1506 LocationSummary* locs = instruction_->locs(); 1506 LocationSummary* locs = instruction_->locs();
1507 locs->live_registers()->Remove(locs->out()); 1507 locs->live_registers()->Remove(locs->out());
1508 1508
1509 compiler->SaveLiveRegisters(locs); 1509 compiler->SaveLiveRegisters(locs);
1510 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position. 1510 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
1511 &label, 1511 &label,
1512 PcDescriptors::kOther, 1512 PcDescriptors::kOther,
1513 locs); 1513 locs);
1514 __ MoveRegister(locs->temp(0).reg(), RAX); 1514 __ MoveRegister(locs->temp(0).reg(), RAX);
1515 compiler->RestoreLiveRegisters(locs); 1515 compiler->RestoreLiveRegisters(locs);
1516 1516
1517 __ jmp(exit_label()); 1517 __ jmp(exit_label());
1518 } 1518 }
1519 1519
1520 private: 1520 private:
1521 StoreInstanceFieldInstr* instruction_; 1521 StoreInstanceFieldInstr* instruction_;
1522 const Class& cls_;
1522 }; 1523 };
1523 1524
1524 1525
1525 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(bool opt) const { 1526 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(bool opt) const {
1526 const intptr_t kNumInputs = 2; 1527 const intptr_t kNumInputs = 2;
1527 const intptr_t kNumTemps = 0; 1528 const intptr_t kNumTemps = 0;
1528 LocationSummary* summary = 1529 LocationSummary* summary =
1529 new LocationSummary(kNumInputs, kNumTemps, 1530 new LocationSummary(kNumInputs, kNumTemps,
1530 (field().guarded_cid() == kIllegalCid) || (is_initialization_) 1531 (field().guarded_cid() == kIllegalCid) || (is_initialization_)
1531 ? LocationSummary::kCallOnSlowPath 1532 ? LocationSummary::kCallOnSlowPath
(...skipping 23 matching lines...) Expand all
1555 1556
1556 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1557 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1557 Label skip_store; 1558 Label skip_store;
1558 1559
1559 Register instance_reg = locs()->in(0).reg(); 1560 Register instance_reg = locs()->in(0).reg();
1560 1561
1561 if (IsUnboxedStore() && compiler->is_optimizing()) { 1562 if (IsUnboxedStore() && compiler->is_optimizing()) {
1562 XmmRegister value = locs()->in(1).fpu_reg(); 1563 XmmRegister value = locs()->in(1).fpu_reg();
1563 Register temp = locs()->temp(0).reg(); 1564 Register temp = locs()->temp(0).reg();
1564 Register temp2 = locs()->temp(1).reg(); 1565 Register temp2 = locs()->temp(1).reg();
1566 const intptr_t cid = field().UnboxedFieldCid();
1565 1567
1566 if (is_initialization_) { 1568 if (is_initialization_) {
1569 const Class* cls = NULL;
1570 switch (cid) {
1571 case kDoubleCid:
1572 cls = &compiler->double_class();
1573 break;
1574 // TODO(johnmccutchan): Add kFloat32x4Cid here.
1575 default:
1576 UNREACHABLE();
1577 }
1567 StoreInstanceFieldSlowPath* slow_path = 1578 StoreInstanceFieldSlowPath* slow_path =
1568 new StoreInstanceFieldSlowPath(this); 1579 new StoreInstanceFieldSlowPath(*cls, this);
1569 compiler->AddSlowPathCode(slow_path); 1580 compiler->AddSlowPathCode(slow_path);
1570 1581
1571 __ TryAllocate(compiler->double_class(), 1582 __ TryAllocate(*cls,
1572 slow_path->entry_label(), 1583 slow_path->entry_label(),
1573 Assembler::kFarJump, 1584 Assembler::kFarJump,
1574 temp, 1585 temp,
1575 PP); 1586 PP);
1576 __ Bind(slow_path->exit_label()); 1587 __ Bind(slow_path->exit_label());
1577 __ movq(temp2, temp); 1588 __ movq(temp2, temp);
1578 __ StoreIntoObject(instance_reg, 1589 __ StoreIntoObject(instance_reg,
1579 FieldAddress(instance_reg, field().Offset()), 1590 FieldAddress(instance_reg, field().Offset()),
1580 temp2); 1591 temp2);
1581 } else { 1592 } else {
1582 __ movq(temp, FieldAddress(instance_reg, field().Offset())); 1593 __ movq(temp, FieldAddress(instance_reg, field().Offset()));
1583 } 1594 }
1584 __ movsd(FieldAddress(temp, Double::value_offset()), value); 1595 switch (cid) {
1596 case kDoubleCid:
1597 __ movsd(FieldAddress(temp, Double::value_offset()), value);
1598 // TODO(johnmccutchan): Add kFloat32x4Cid here.
1599 break;
1600 default:
1601 UNREACHABLE();
1602 }
1585 return; 1603 return;
1586 } 1604 }
1587 1605
1588 if (IsPotentialUnboxedStore()) { 1606 if (IsPotentialUnboxedStore()) {
1589 Register value_reg = locs()->in(1).reg(); 1607 Register value_reg = locs()->in(1).reg();
1590 Register temp = locs()->temp(0).reg(); 1608 Register temp = locs()->temp(0).reg();
1591 Register temp2 = locs()->temp(1).reg(); 1609 Register temp2 = locs()->temp(1).reg();
1592 FpuRegister fpu_temp = locs()->temp(2).fpu_reg(); 1610 FpuRegister fpu_temp = locs()->temp(2).fpu_reg();
1593 1611
1594 Label store_pointer, copy_payload; 1612 Label store_pointer;
1613 Label copy_double;
1614 Label store_double;
1615
1595 __ LoadObject(temp, Field::ZoneHandle(field().raw()), PP); 1616 __ LoadObject(temp, Field::ZoneHandle(field().raw()), PP);
1596 __ cmpq(FieldAddress(temp, Field::guarded_cid_offset()), 1617
1597 Immediate(kDoubleCid));
1598 __ j(NOT_EQUAL, &store_pointer);
1599 __ cmpq(FieldAddress(temp, Field::is_nullable_offset()), 1618 __ cmpq(FieldAddress(temp, Field::is_nullable_offset()),
1600 Immediate(kNullCid)); 1619 Immediate(kNullCid));
1601 __ j(EQUAL, &store_pointer); 1620 __ j(EQUAL, &store_pointer);
1621
1602 __ movzxb(temp2, FieldAddress(temp, Field::kind_bits_offset())); 1622 __ movzxb(temp2, FieldAddress(temp, Field::kind_bits_offset()));
1603 __ testq(temp2, Immediate(1 << Field::kUnboxingCandidateBit)); 1623 __ testq(temp2, Immediate(1 << Field::kUnboxingCandidateBit));
1604 __ j(ZERO, &store_pointer); 1624 __ j(ZERO, &store_pointer);
1605 1625
1626 __ cmpq(FieldAddress(temp, Field::guarded_cid_offset()),
1627 Immediate(kDoubleCid));
1628 __ j(EQUAL, &store_double);
1629
1630 // Fall through.
1631 __ jmp(&store_pointer);
1632
1633 __ Bind(&store_double);
1634
1606 __ movq(temp, FieldAddress(instance_reg, field().Offset())); 1635 __ movq(temp, FieldAddress(instance_reg, field().Offset()));
1607 __ CompareObject(temp, Object::null_object(), PP); 1636 __ CompareObject(temp, Object::null_object(), PP);
1608 __ j(NOT_EQUAL, &copy_payload); 1637 __ j(NOT_EQUAL, &copy_double);
1609 1638
1610 StoreInstanceFieldSlowPath* slow_path = 1639 StoreInstanceFieldSlowPath* slow_path =
1611 new StoreInstanceFieldSlowPath(this); 1640 new StoreInstanceFieldSlowPath(compiler->double_class(), this);
1612 compiler->AddSlowPathCode(slow_path); 1641 compiler->AddSlowPathCode(slow_path);
1613 1642
1614 if (!compiler->is_optimizing()) { 1643 if (!compiler->is_optimizing()) {
1615 locs()->live_registers()->Add(locs()->in(0)); 1644 locs()->live_registers()->Add(locs()->in(0));
1616 locs()->live_registers()->Add(locs()->in(1)); 1645 locs()->live_registers()->Add(locs()->in(1));
1617 } 1646 }
1618 __ TryAllocate(compiler->double_class(), 1647 __ TryAllocate(compiler->double_class(),
1619 slow_path->entry_label(), 1648 slow_path->entry_label(),
1620 Assembler::kFarJump, 1649 Assembler::kFarJump,
1621 temp, 1650 temp,
1622 PP); 1651 PP);
1623 __ Bind(slow_path->exit_label()); 1652 __ Bind(slow_path->exit_label());
1624 __ movq(temp2, temp); 1653 __ movq(temp2, temp);
1625 __ StoreIntoObject(instance_reg, 1654 __ StoreIntoObject(instance_reg,
1626 FieldAddress(instance_reg, field().Offset()), 1655 FieldAddress(instance_reg, field().Offset()),
1627 temp2); 1656 temp2);
1628 1657
1629 __ Bind(&copy_payload); 1658 __ Bind(&copy_double);
1630 __ movsd(fpu_temp, FieldAddress(value_reg, Double::value_offset())); 1659 __ movsd(fpu_temp, FieldAddress(value_reg, Double::value_offset()));
1631 __ movsd(FieldAddress(temp, Double::value_offset()), fpu_temp); 1660 __ movsd(FieldAddress(temp, Double::value_offset()), fpu_temp);
1632 __ jmp(&skip_store); 1661 __ jmp(&skip_store);
1633 __ Bind(&store_pointer); 1662 __ Bind(&store_pointer);
1634 } 1663 }
1635 1664
1636 if (ShouldEmitStoreBarrier()) { 1665 if (ShouldEmitStoreBarrier()) {
1637 Register value_reg = locs()->in(1).reg(); 1666 Register value_reg = locs()->in(1).reg();
1638 __ StoreIntoObject(instance_reg, 1667 __ StoreIntoObject(instance_reg,
1639 FieldAddress(instance_reg, field().Offset()), 1668 FieldAddress(instance_reg, field().Offset()),
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after
1824 return locs; 1853 return locs;
1825 } 1854 }
1826 1855
1827 1856
1828 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1857 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1829 Register instance_reg = locs()->in(0).reg(); 1858 Register instance_reg = locs()->in(0).reg();
1830 if (IsUnboxedLoad() && compiler->is_optimizing()) { 1859 if (IsUnboxedLoad() && compiler->is_optimizing()) {
1831 XmmRegister result = locs()->out().fpu_reg(); 1860 XmmRegister result = locs()->out().fpu_reg();
1832 Register temp = locs()->temp(0).reg(); 1861 Register temp = locs()->temp(0).reg();
1833 __ movq(temp, FieldAddress(instance_reg, offset_in_bytes())); 1862 __ movq(temp, FieldAddress(instance_reg, offset_in_bytes()));
1834 __ movsd(result, FieldAddress(temp, Double::value_offset())); 1863 intptr_t cid = field()->UnboxedFieldCid();
1864 switch (cid) {
1865 case kDoubleCid:
1866 __ movsd(result, FieldAddress(temp, Double::value_offset()));
1867 break;
1868 // TODO(johnmccutchan): Add Float32x4 path here.
1869 default:
1870 UNREACHABLE();
1871 }
1835 return; 1872 return;
1836 } 1873 }
1837 1874
1838 Label done; 1875 Label done;
1839 Register result = locs()->out().reg(); 1876 Register result = locs()->out().reg();
1840 if (IsPotentialUnboxedLoad()) { 1877 if (IsPotentialUnboxedLoad()) {
1841 Register temp = locs()->temp(1).reg(); 1878 Register temp = locs()->temp(1).reg();
1842 XmmRegister value = locs()->temp(0).fpu_reg(); 1879 XmmRegister value = locs()->temp(0).fpu_reg();
1843 1880
1844 Label load_pointer; 1881 Label load_pointer;
1882 Label load_double;
1883
1845 __ LoadObject(result, Field::ZoneHandle(field()->raw()), PP); 1884 __ LoadObject(result, Field::ZoneHandle(field()->raw()), PP);
1846 1885
1847
1848 __ cmpq(FieldAddress(result, Field::guarded_cid_offset()),
1849 Immediate(kDoubleCid));
1850 __ j(NOT_EQUAL, &load_pointer);
1851 __ cmpq(FieldAddress(result, Field::is_nullable_offset()), 1886 __ cmpq(FieldAddress(result, Field::is_nullable_offset()),
1852 Immediate(kNullCid)); 1887 Immediate(kNullCid));
1853 __ j(EQUAL, &load_pointer); 1888 __ j(EQUAL, &load_pointer);
1854 1889
1890 __ cmpq(FieldAddress(result, Field::guarded_cid_offset()),
1891 Immediate(kDoubleCid));
1892 __ j(EQUAL, &load_double);
1893
1894 // Fall through.
1895 __ jmp(&load_pointer);
1896
1897 __ Bind(&load_double);
1898
1855 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this); 1899 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this);
1856 compiler->AddSlowPathCode(slow_path); 1900 compiler->AddSlowPathCode(slow_path);
1857 1901
1858 if (!compiler->is_optimizing()) { 1902 if (!compiler->is_optimizing()) {
1859 locs()->live_registers()->Add(locs()->in(0)); 1903 locs()->live_registers()->Add(locs()->in(0));
1860 } 1904 }
1861 1905
1862 __ TryAllocate(compiler->double_class(), 1906 __ TryAllocate(compiler->double_class(),
1863 slow_path->entry_label(), 1907 slow_path->entry_label(),
1864 Assembler::kFarJump, 1908 Assembler::kFarJump,
1865 result, 1909 result,
1866 PP); 1910 PP);
1867 __ Bind(slow_path->exit_label()); 1911 __ Bind(slow_path->exit_label());
1868 __ movq(temp, FieldAddress(instance_reg, offset_in_bytes())); 1912 __ movq(temp, FieldAddress(instance_reg, offset_in_bytes()));
1869 __ movsd(value, FieldAddress(temp, Double::value_offset())); 1913 __ movsd(value, FieldAddress(temp, Double::value_offset()));
1870 __ movsd(FieldAddress(result, Double::value_offset()), value); 1914 __ movsd(FieldAddress(result, Double::value_offset()), value);
1871 __ jmp(&done); 1915 __ jmp(&done);
1916
1917 // TODO(johnmccutchan): Add Float32x4 path here.
1918
1872 __ Bind(&load_pointer); 1919 __ Bind(&load_pointer);
1873 } 1920 }
1874 __ movq(result, FieldAddress(instance_reg, offset_in_bytes())); 1921 __ movq(result, FieldAddress(instance_reg, offset_in_bytes()));
1875 __ Bind(&done); 1922 __ Bind(&done);
1876 } 1923 }
1877 1924
1878 1925
1879 LocationSummary* InstantiateTypeInstr::MakeLocationSummary(bool opt) const { 1926 LocationSummary* InstantiateTypeInstr::MakeLocationSummary(bool opt) const {
1880 const intptr_t kNumInputs = 1; 1927 const intptr_t kNumInputs = 1;
1881 const intptr_t kNumTemps = 0; 1928 const intptr_t kNumTemps = 0;
(...skipping 3071 matching lines...) Expand 10 before | Expand all | Expand 10 after
4953 PcDescriptors::kOther, 5000 PcDescriptors::kOther,
4954 locs()); 5001 locs());
4955 __ Drop(2); // Discard type arguments and receiver. 5002 __ Drop(2); // Discard type arguments and receiver.
4956 } 5003 }
4957 5004
4958 } // namespace dart 5005 } // namespace dart
4959 5006
4960 #undef __ 5007 #undef __
4961 5008
4962 #endif // defined TARGET_ARCH_X64 5009 #endif // defined TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698