Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(48)

Side by Side Diff: runtime/vm/intermediate_language_x64.cc

Issue 2895183002: More compact string representation on 64 bit. (Closed)
Patch Set: Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/intermediate_language_arm64.cc ('k') | runtime/vm/intrinsifier_arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6 #if defined(TARGET_ARCH_X64) 6 #if defined(TARGET_ARCH_X64)
7 7
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 9
10 #include "vm/compiler.h" 10 #include "vm/compiler.h"
(...skipping 1481 matching lines...) Expand 10 before | Expand all | Expand 10 after
1492 for (intptr_t i = 0; i < num_temps; i++) { 1492 for (intptr_t i = 0; i < num_temps; i++) {
1493 summary->set_temp(i, Location::RequiresRegister()); 1493 summary->set_temp(i, Location::RequiresRegister());
1494 } 1494 }
1495 1495
1496 1496
1497 return summary; 1497 return summary;
1498 } 1498 }
1499 1499
1500 1500
1501 void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1501 void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1502 ASSERT(sizeof(classid_t) == kInt32Size); 1502 ASSERT(sizeof(classid_t) == kInt16Size);
1503 const intptr_t value_cid = value()->Type()->ToCid(); 1503 const intptr_t value_cid = value()->Type()->ToCid();
1504 const intptr_t field_cid = field().guarded_cid(); 1504 const intptr_t field_cid = field().guarded_cid();
1505 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid; 1505 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
1506 1506
1507 if (field_cid == kDynamicCid) { 1507 if (field_cid == kDynamicCid) {
1508 if (Compiler::IsBackgroundCompilation()) { 1508 if (Compiler::IsBackgroundCompilation()) {
1509 // Field state changed while compiling. 1509 // Field state changed while compiling.
1510 Compiler::AbortBackgroundCompilation( 1510 Compiler::AbortBackgroundCompilation(
1511 deopt_id(), 1511 deopt_id(),
1512 "GuardFieldClassInstr: field state changed while compiling"); 1512 "GuardFieldClassInstr: field state changed while compiling");
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
1544 if (emit_full_guard) { 1544 if (emit_full_guard) {
1545 __ LoadObject(field_reg, Field::ZoneHandle(field().Original())); 1545 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
1546 1546
1547 FieldAddress field_cid_operand(field_reg, Field::guarded_cid_offset()); 1547 FieldAddress field_cid_operand(field_reg, Field::guarded_cid_offset());
1548 FieldAddress field_nullability_operand(field_reg, 1548 FieldAddress field_nullability_operand(field_reg,
1549 Field::is_nullable_offset()); 1549 Field::is_nullable_offset());
1550 1550
1551 if (value_cid == kDynamicCid) { 1551 if (value_cid == kDynamicCid) {
1552 LoadValueCid(compiler, value_cid_reg, value_reg); 1552 LoadValueCid(compiler, value_cid_reg, value_reg);
1553 1553
1554 __ cmpl(value_cid_reg, field_cid_operand); 1554 __ cmpw(value_cid_reg, field_cid_operand);
1555 __ j(EQUAL, &ok); 1555 __ j(EQUAL, &ok);
1556 __ cmpl(value_cid_reg, field_nullability_operand); 1556 __ cmpw(value_cid_reg, field_nullability_operand);
1557 } else if (value_cid == kNullCid) { 1557 } else if (value_cid == kNullCid) {
1558 __ cmpl(field_nullability_operand, Immediate(value_cid)); 1558 __ cmpw(field_nullability_operand, Immediate(value_cid));
1559 } else { 1559 } else {
1560 __ cmpl(field_cid_operand, Immediate(value_cid)); 1560 __ cmpw(field_cid_operand, Immediate(value_cid));
1561 } 1561 }
1562 __ j(EQUAL, &ok); 1562 __ j(EQUAL, &ok);
1563 1563
1564 // Check if the tracked state of the guarded field can be initialized 1564 // Check if the tracked state of the guarded field can be initialized
1565 // inline. If the field needs length check we fall through to runtime 1565 // inline. If the field needs length check we fall through to runtime
1566 // which is responsible for computing offset of the length field 1566 // which is responsible for computing offset of the length field
1567 // based on the class id. 1567 // based on the class id.
1568 if (!field().needs_length_check()) { 1568 if (!field().needs_length_check()) {
1569 // Uninitialized field can be handled inline. Check if the 1569 // Uninitialized field can be handled inline. Check if the
1570 // field is still unitialized. 1570 // field is still unitialized.
1571 __ cmpl(field_cid_operand, Immediate(kIllegalCid)); 1571 __ cmpw(field_cid_operand, Immediate(kIllegalCid));
1572 __ j(NOT_EQUAL, fail); 1572 __ j(NOT_EQUAL, fail);
1573 1573
1574 if (value_cid == kDynamicCid) { 1574 if (value_cid == kDynamicCid) {
1575 __ movl(field_cid_operand, value_cid_reg); 1575 __ movw(field_cid_operand, value_cid_reg);
1576 __ movl(field_nullability_operand, value_cid_reg); 1576 __ movw(field_nullability_operand, value_cid_reg);
1577 } else { 1577 } else {
1578 ASSERT(field_reg != kNoRegister); 1578 ASSERT(field_reg != kNoRegister);
1579 __ movl(field_cid_operand, Immediate(value_cid)); 1579 __ movw(field_cid_operand, Immediate(value_cid));
1580 __ movl(field_nullability_operand, Immediate(value_cid)); 1580 __ movw(field_nullability_operand, Immediate(value_cid));
1581 } 1581 }
1582 1582
1583 if (deopt == NULL) { 1583 if (deopt == NULL) {
1584 ASSERT(!compiler->is_optimizing()); 1584 ASSERT(!compiler->is_optimizing());
1585 __ jmp(&ok); 1585 __ jmp(&ok);
1586 } 1586 }
1587 } 1587 }
1588 1588
1589 if (deopt == NULL) { 1589 if (deopt == NULL) {
1590 ASSERT(!compiler->is_optimizing()); 1590 ASSERT(!compiler->is_optimizing());
1591 __ Bind(fail); 1591 __ Bind(fail);
1592 1592
1593 __ cmpl(FieldAddress(field_reg, Field::guarded_cid_offset()), 1593 __ cmpw(FieldAddress(field_reg, Field::guarded_cid_offset()),
1594 Immediate(kDynamicCid)); 1594 Immediate(kDynamicCid));
1595 __ j(EQUAL, &ok); 1595 __ j(EQUAL, &ok);
1596 1596
1597 __ pushq(field_reg); 1597 __ pushq(field_reg);
1598 __ pushq(value_reg); 1598 __ pushq(value_reg);
1599 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); 1599 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
1600 __ Drop(2); // Drop the field and the value. 1600 __ Drop(2); // Drop the field and the value.
1601 } 1601 }
1602 } else { 1602 } else {
1603 ASSERT(compiler->is_optimizing()); 1603 ASSERT(compiler->is_optimizing());
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after
1826 __ j(NOT_EQUAL, &done); 1826 __ j(NOT_EQUAL, &done);
1827 BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp); 1827 BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp);
1828 __ movq(temp, box_reg); 1828 __ movq(temp, box_reg);
1829 __ StoreIntoObject(instance_reg, FieldAddress(instance_reg, offset), temp); 1829 __ StoreIntoObject(instance_reg, FieldAddress(instance_reg, offset), temp);
1830 1830
1831 __ Bind(&done); 1831 __ Bind(&done);
1832 } 1832 }
1833 1833
1834 1834
1835 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1835 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1836 ASSERT(sizeof(classid_t) == kInt32Size); 1836 ASSERT(sizeof(classid_t) == kInt16Size);
1837 Label skip_store; 1837 Label skip_store;
1838 1838
1839 Register instance_reg = locs()->in(0).reg(); 1839 Register instance_reg = locs()->in(0).reg();
1840 1840
1841 if (IsUnboxedStore() && compiler->is_optimizing()) { 1841 if (IsUnboxedStore() && compiler->is_optimizing()) {
1842 XmmRegister value = locs()->in(1).fpu_reg(); 1842 XmmRegister value = locs()->in(1).fpu_reg();
1843 Register temp = locs()->temp(0).reg(); 1843 Register temp = locs()->temp(0).reg();
1844 Register temp2 = locs()->temp(1).reg(); 1844 Register temp2 = locs()->temp(1).reg();
1845 const intptr_t cid = field().UnboxedFieldCid(); 1845 const intptr_t cid = field().UnboxedFieldCid();
1846 1846
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
1898 locs()->live_registers()->Add(locs()->in(1), kTagged); 1898 locs()->live_registers()->Add(locs()->in(1), kTagged);
1899 } 1899 }
1900 1900
1901 Label store_pointer; 1901 Label store_pointer;
1902 Label store_double; 1902 Label store_double;
1903 Label store_float32x4; 1903 Label store_float32x4;
1904 Label store_float64x2; 1904 Label store_float64x2;
1905 1905
1906 __ LoadObject(temp, Field::ZoneHandle(Z, field().Original())); 1906 __ LoadObject(temp, Field::ZoneHandle(Z, field().Original()));
1907 1907
1908 __ cmpl(FieldAddress(temp, Field::is_nullable_offset()), 1908 __ cmpw(FieldAddress(temp, Field::is_nullable_offset()),
1909 Immediate(kNullCid)); 1909 Immediate(kNullCid));
1910 __ j(EQUAL, &store_pointer); 1910 __ j(EQUAL, &store_pointer);
1911 1911
1912 __ movzxb(temp2, FieldAddress(temp, Field::kind_bits_offset())); 1912 __ movzxb(temp2, FieldAddress(temp, Field::kind_bits_offset()));
1913 __ testq(temp2, Immediate(1 << Field::kUnboxingCandidateBit)); 1913 __ testq(temp2, Immediate(1 << Field::kUnboxingCandidateBit));
1914 __ j(ZERO, &store_pointer); 1914 __ j(ZERO, &store_pointer);
1915 1915
1916 __ cmpl(FieldAddress(temp, Field::guarded_cid_offset()), 1916 __ cmpw(FieldAddress(temp, Field::guarded_cid_offset()),
1917 Immediate(kDoubleCid)); 1917 Immediate(kDoubleCid));
1918 __ j(EQUAL, &store_double); 1918 __ j(EQUAL, &store_double);
1919 1919
1920 __ cmpl(FieldAddress(temp, Field::guarded_cid_offset()), 1920 __ cmpw(FieldAddress(temp, Field::guarded_cid_offset()),
1921 Immediate(kFloat32x4Cid)); 1921 Immediate(kFloat32x4Cid));
1922 __ j(EQUAL, &store_float32x4); 1922 __ j(EQUAL, &store_float32x4);
1923 1923
1924 __ cmpl(FieldAddress(temp, Field::guarded_cid_offset()), 1924 __ cmpw(FieldAddress(temp, Field::guarded_cid_offset()),
1925 Immediate(kFloat64x2Cid)); 1925 Immediate(kFloat64x2Cid));
1926 __ j(EQUAL, &store_float64x2); 1926 __ j(EQUAL, &store_float64x2);
1927 1927
1928 // Fall through. 1928 // Fall through.
1929 __ jmp(&store_pointer); 1929 __ jmp(&store_pointer);
1930 1930
1931 if (!compiler->is_optimizing()) { 1931 if (!compiler->is_optimizing()) {
1932 locs()->live_registers()->Add(locs()->in(0)); 1932 locs()->live_registers()->Add(locs()->in(0));
1933 locs()->live_registers()->Add(locs()->in(1)); 1933 locs()->live_registers()->Add(locs()->in(1));
1934 } 1934 }
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after
2184 locs->set_temp(0, opt ? Location::RequiresFpuRegister() 2184 locs->set_temp(0, opt ? Location::RequiresFpuRegister()
2185 : Location::FpuRegisterLocation(XMM1)); 2185 : Location::FpuRegisterLocation(XMM1));
2186 locs->set_temp(1, Location::RequiresRegister()); 2186 locs->set_temp(1, Location::RequiresRegister());
2187 } 2187 }
2188 locs->set_out(0, Location::RequiresRegister()); 2188 locs->set_out(0, Location::RequiresRegister());
2189 return locs; 2189 return locs;
2190 } 2190 }
2191 2191
2192 2192
2193 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2193 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2194 ASSERT(sizeof(classid_t) == kInt32Size); 2194 ASSERT(sizeof(classid_t) == kInt16Size);
2195 Register instance_reg = locs()->in(0).reg(); 2195 Register instance_reg = locs()->in(0).reg();
2196 if (IsUnboxedLoad() && compiler->is_optimizing()) { 2196 if (IsUnboxedLoad() && compiler->is_optimizing()) {
2197 XmmRegister result = locs()->out(0).fpu_reg(); 2197 XmmRegister result = locs()->out(0).fpu_reg();
2198 Register temp = locs()->temp(0).reg(); 2198 Register temp = locs()->temp(0).reg();
2199 __ movq(temp, FieldAddress(instance_reg, offset_in_bytes())); 2199 __ movq(temp, FieldAddress(instance_reg, offset_in_bytes()));
2200 intptr_t cid = field()->UnboxedFieldCid(); 2200 intptr_t cid = field()->UnboxedFieldCid();
2201 switch (cid) { 2201 switch (cid) {
2202 case kDoubleCid: 2202 case kDoubleCid:
2203 __ Comment("UnboxedDoubleLoadFieldInstr"); 2203 __ Comment("UnboxedDoubleLoadFieldInstr");
2204 __ movsd(result, FieldAddress(temp, Double::value_offset())); 2204 __ movsd(result, FieldAddress(temp, Double::value_offset()));
(...skipping 18 matching lines...) Expand all
2223 Register temp = locs()->temp(1).reg(); 2223 Register temp = locs()->temp(1).reg();
2224 XmmRegister value = locs()->temp(0).fpu_reg(); 2224 XmmRegister value = locs()->temp(0).fpu_reg();
2225 2225
2226 Label load_pointer; 2226 Label load_pointer;
2227 Label load_double; 2227 Label load_double;
2228 Label load_float32x4; 2228 Label load_float32x4;
2229 Label load_float64x2; 2229 Label load_float64x2;
2230 2230
2231 __ LoadObject(result, Field::ZoneHandle(field()->Original())); 2231 __ LoadObject(result, Field::ZoneHandle(field()->Original()));
2232 2232
2233 __ cmpl(FieldAddress(result, Field::is_nullable_offset()), 2233 FieldAddress field_cid_operand(result, Field::guarded_cid_offset());
2234 Immediate(kNullCid)); 2234 FieldAddress field_nullability_operand(result, Field::is_nullable_offset());
2235
2236 __ cmpw(field_nullability_operand, Immediate(kNullCid));
2235 __ j(EQUAL, &load_pointer); 2237 __ j(EQUAL, &load_pointer);
2236 2238
2237 __ cmpl(FieldAddress(result, Field::guarded_cid_offset()), 2239 __ cmpw(field_cid_operand, Immediate(kDoubleCid));
2238 Immediate(kDoubleCid));
2239 __ j(EQUAL, &load_double); 2240 __ j(EQUAL, &load_double);
2240 2241
2241 __ cmpl(FieldAddress(result, Field::guarded_cid_offset()), 2242 __ cmpw(field_cid_operand, Immediate(kFloat32x4Cid));
2242 Immediate(kFloat32x4Cid));
2243 __ j(EQUAL, &load_float32x4); 2243 __ j(EQUAL, &load_float32x4);
2244 2244
2245 __ cmpl(FieldAddress(result, Field::guarded_cid_offset()), 2245 __ cmpw(field_cid_operand, Immediate(kFloat64x2Cid));
2246 Immediate(kFloat64x2Cid));
2247 __ j(EQUAL, &load_float64x2); 2246 __ j(EQUAL, &load_float64x2);
2248 2247
2249 // Fall through. 2248 // Fall through.
2250 __ jmp(&load_pointer); 2249 __ jmp(&load_pointer);
2251 2250
2252 if (!compiler->is_optimizing()) { 2251 if (!compiler->is_optimizing()) {
2253 locs()->live_registers()->Add(locs()->in(0)); 2252 locs()->live_registers()->Add(locs()->in(0));
2254 } 2253 }
2255 2254
2256 { 2255 {
(...skipping 4520 matching lines...) Expand 10 before | Expand all | Expand 10 after
6777 __ Drop(1); 6776 __ Drop(1);
6778 __ popq(result); 6777 __ popq(result);
6779 } 6778 }
6780 6779
6781 6780
6782 } // namespace dart 6781 } // namespace dart
6783 6782
6784 #undef __ 6783 #undef __
6785 6784
6786 #endif // defined TARGET_ARCH_X64 6785 #endif // defined TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « runtime/vm/intermediate_language_arm64.cc ('k') | runtime/vm/intrinsifier_arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698