OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. |
6 #if defined(TARGET_ARCH_X64) | 6 #if defined(TARGET_ARCH_X64) |
7 | 7 |
8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" |
9 | 9 |
10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
65 // This snippet of inlined code uses the following registers: | 65 // This snippet of inlined code uses the following registers: |
66 // RAX, RCX, R13 | 66 // RAX, RCX, R13 |
67 // and the newly allocated object is returned in RAX. | 67 // and the newly allocated object is returned in RAX. |
68 const intptr_t kTypeArgumentsOffset = 2 * kWordSize; | 68 const intptr_t kTypeArgumentsOffset = 2 * kWordSize; |
69 const intptr_t kArrayOffset = 1 * kWordSize; | 69 const intptr_t kArrayOffset = 1 * kWordSize; |
70 Label fall_through; | 70 Label fall_through; |
71 | 71 |
72 // Try allocating in new space. | 72 // Try allocating in new space. |
73 const Class& cls = Class::Handle( | 73 const Class& cls = Class::Handle( |
74 Isolate::Current()->object_store()->growable_object_array_class()); | 74 Isolate::Current()->object_store()->growable_object_array_class()); |
75 __ TryAllocate(cls, &fall_through, Assembler::kFarJump, RAX, kNoRegister); | 75 __ TryAllocate(cls, &fall_through, Assembler::kFarJump, RAX, |
| 76 kNoRegister, // Pool pointer might not be loaded. |
| 77 R13); // temp |
76 | 78 |
77 // Store backing array object in growable array object. | 79 // Store backing array object in growable array object. |
78 __ movq(RCX, Address(RSP, kArrayOffset)); // data argument. | 80 __ movq(RCX, Address(RSP, kArrayOffset)); // data argument. |
79 // RAX is new, no barrier needed. | 81 // RAX is new, no barrier needed. |
80 __ InitializeFieldNoBarrier( | 82 __ InitializeFieldNoBarrier( |
81 RAX, | 83 RAX, |
82 FieldAddress(RAX, GrowableObjectArray::data_offset()), | 84 FieldAddress(RAX, GrowableObjectArray::data_offset()), |
83 RCX); | 85 RCX); |
84 | 86 |
85 // RAX: new growable array object start as a tagged pointer. | 87 // RAX: new growable array object start as a tagged pointer. |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
123 RAX); | 125 RAX); |
124 __ LoadObject(RAX, Object::null_object(), PP); | 126 __ LoadObject(RAX, Object::null_object(), PP); |
125 __ ret(); | 127 __ ret(); |
126 __ Bind(&fall_through); | 128 __ Bind(&fall_through); |
127 } | 129 } |
128 | 130 |
129 | 131 |
130 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_factor) \ | 132 #define TYPED_ARRAY_ALLOCATION(type_name, cid, max_len, scale_factor) \ |
131 Label fall_through; \ | 133 Label fall_through; \ |
132 const intptr_t kArrayLengthStackOffset = 1 * kWordSize; \ | 134 const intptr_t kArrayLengthStackOffset = 1 * kWordSize; \ |
133 __ MaybeTraceAllocation(cid, &fall_through, false); \ | 135 __ MaybeTraceAllocation(cid, &fall_through, false, \ |
| 136 /* inline_isolate = */ false); \ |
134 __ movq(RDI, Address(RSP, kArrayLengthStackOffset)); /* Array length. */ \ | 137 __ movq(RDI, Address(RSP, kArrayLengthStackOffset)); /* Array length. */ \ |
135 /* Check that length is a positive Smi. */ \ | 138 /* Check that length is a positive Smi. */ \ |
136 /* RDI: requested array length argument. */ \ | 139 /* RDI: requested array length argument. */ \ |
137 __ testq(RDI, Immediate(kSmiTagMask)); \ | 140 __ testq(RDI, Immediate(kSmiTagMask)); \ |
138 __ j(NOT_ZERO, &fall_through); \ | 141 __ j(NOT_ZERO, &fall_through); \ |
139 __ cmpq(RDI, Immediate(0)); \ | 142 __ cmpq(RDI, Immediate(0)); \ |
140 __ j(LESS, &fall_through); \ | 143 __ j(LESS, &fall_through); \ |
141 __ SmiUntag(RDI); \ | 144 __ SmiUntag(RDI); \ |
142 /* Check for maximum allowed length. */ \ | 145 /* Check for maximum allowed length. */ \ |
143 /* RDI: untagged array length. */ \ | 146 /* RDI: untagged array length. */ \ |
144 __ cmpq(RDI, Immediate(max_len)); \ | 147 __ cmpq(RDI, Immediate(max_len)); \ |
145 __ j(GREATER, &fall_through); \ | 148 __ j(GREATER, &fall_through); \ |
146 /* Special case for scaling by 16. */ \ | 149 /* Special case for scaling by 16. */ \ |
147 if (scale_factor == TIMES_16) { \ | 150 if (scale_factor == TIMES_16) { \ |
148 /* double length of array. */ \ | 151 /* double length of array. */ \ |
149 __ addq(RDI, RDI); \ | 152 __ addq(RDI, RDI); \ |
150 /* only scale by 8. */ \ | 153 /* only scale by 8. */ \ |
151 scale_factor = TIMES_8; \ | 154 scale_factor = TIMES_8; \ |
152 } \ | 155 } \ |
153 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \ | 156 const intptr_t fixed_size = sizeof(Raw##type_name) + kObjectAlignment - 1; \ |
154 __ leaq(RDI, Address(RDI, scale_factor, fixed_size)); \ | 157 __ leaq(RDI, Address(RDI, scale_factor, fixed_size)); \ |
155 __ andq(RDI, Immediate(-kObjectAlignment)); \ | 158 __ andq(RDI, Immediate(-kObjectAlignment)); \ |
156 Heap* heap = Isolate::Current()->heap(); \ | 159 Heap::Space space = Heap::SpaceForAllocation(cid); \ |
157 Heap::Space space = heap->SpaceForAllocation(cid); \ | 160 __ movq(R13, Address(THR, Thread::heap_offset())); \ |
158 __ movq(RAX, Immediate(heap->TopAddress(space))); \ | 161 __ movq(RAX, Address(R13, Heap::TopOffset(space))); \ |
159 __ movq(RAX, Address(RAX, 0)); \ | |
160 __ movq(RCX, RAX); \ | 162 __ movq(RCX, RAX); \ |
161 \ | 163 \ |
162 /* RDI: allocation size. */ \ | 164 /* RDI: allocation size. */ \ |
163 __ addq(RCX, RDI); \ | 165 __ addq(RCX, RDI); \ |
164 __ j(CARRY, &fall_through); \ | 166 __ j(CARRY, &fall_through); \ |
165 \ | 167 \ |
166 /* Check if the allocation fits into the remaining space. */ \ | 168 /* Check if the allocation fits into the remaining space. */ \ |
167 /* RAX: potential new object start. */ \ | 169 /* RAX: potential new object start. */ \ |
168 /* RCX: potential next object start. */ \ | 170 /* RCX: potential next object start. */ \ |
169 /* RDI: allocation size. */ \ | 171 /* RDI: allocation size. */ \ |
170 /* R13: scratch register. */ \ | 172 /* R13: heap. */ \ |
171 __ movq(R13, Immediate(heap->EndAddress(space))); \ | 173 __ cmpq(RCX, Address(R13, Heap::EndOffset(space))); \ |
172 __ cmpq(RCX, Address(R13, 0)); \ | |
173 __ j(ABOVE_EQUAL, &fall_through); \ | 174 __ j(ABOVE_EQUAL, &fall_through); \ |
174 \ | 175 \ |
175 /* Successfully allocated the object(s), now update top to point to */ \ | 176 /* Successfully allocated the object(s), now update top to point to */ \ |
176 /* next object start and initialize the object. */ \ | 177 /* next object start and initialize the object. */ \ |
177 __ movq(R13, Immediate(heap->TopAddress(space))); \ | 178 __ movq(Address(R13, Heap::TopOffset(space)), RCX); \ |
178 __ movq(Address(R13, 0), RCX); \ | |
179 __ addq(RAX, Immediate(kHeapObjectTag)); \ | 179 __ addq(RAX, Immediate(kHeapObjectTag)); \ |
180 __ UpdateAllocationStatsWithSize(cid, RDI, space); \ | 180 __ UpdateAllocationStatsWithSize(cid, RDI, space, \ |
| 181 /* inline_isolate = */ false); \ |
181 /* Initialize the tags. */ \ | 182 /* Initialize the tags. */ \ |
182 /* RAX: new object start as a tagged pointer. */ \ | 183 /* RAX: new object start as a tagged pointer. */ \ |
183 /* RCX: new object end address. */ \ | 184 /* RCX: new object end address. */ \ |
184 /* RDI: allocation size. */ \ | 185 /* RDI: allocation size. */ \ |
185 /* R13: scratch register. */ \ | 186 /* R13: scratch register. */ \ |
186 { \ | 187 { \ |
187 Label size_tag_overflow, done; \ | 188 Label size_tag_overflow, done; \ |
188 __ cmpq(RDI, Immediate(RawObject::SizeTag::kMaxSizeTag)); \ | 189 __ cmpq(RDI, Immediate(RawObject::SizeTag::kMaxSizeTag)); \ |
189 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump); \ | 190 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump); \ |
190 __ shlq(RDI, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2)); \ | 191 __ shlq(RDI, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2)); \ |
(...skipping 1124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1315 case Token::kMUL: __ mulsd(XMM0, XMM1); break; | 1316 case Token::kMUL: __ mulsd(XMM0, XMM1); break; |
1316 case Token::kDIV: __ divsd(XMM0, XMM1); break; | 1317 case Token::kDIV: __ divsd(XMM0, XMM1); break; |
1317 default: UNREACHABLE(); | 1318 default: UNREACHABLE(); |
1318 } | 1319 } |
1319 const Class& double_class = Class::Handle( | 1320 const Class& double_class = Class::Handle( |
1320 Isolate::Current()->object_store()->double_class()); | 1321 Isolate::Current()->object_store()->double_class()); |
1321 __ TryAllocate(double_class, | 1322 __ TryAllocate(double_class, |
1322 &fall_through, | 1323 &fall_through, |
1323 Assembler::kFarJump, | 1324 Assembler::kFarJump, |
1324 RAX, // Result register. | 1325 RAX, // Result register. |
1325 kNoRegister); // Pool pointer might not be loaded. | 1326 kNoRegister, // Pool pointer might not be loaded. |
| 1327 R13); // temp |
1326 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); | 1328 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); |
1327 __ ret(); | 1329 __ ret(); |
1328 __ Bind(&fall_through); | 1330 __ Bind(&fall_through); |
1329 } | 1331 } |
1330 | 1332 |
1331 | 1333 |
1332 void Intrinsifier::Double_add(Assembler* assembler) { | 1334 void Intrinsifier::Double_add(Assembler* assembler) { |
1333 DoubleArithmeticOperations(assembler, Token::kADD); | 1335 DoubleArithmeticOperations(assembler, Token::kADD); |
1334 } | 1336 } |
1335 | 1337 |
(...skipping 24 matching lines...) Expand all Loading... |
1360 __ cvtsi2sdq(XMM1, RAX); | 1362 __ cvtsi2sdq(XMM1, RAX); |
1361 __ movq(RAX, Address(RSP, + 2 * kWordSize)); | 1363 __ movq(RAX, Address(RSP, + 2 * kWordSize)); |
1362 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset())); | 1364 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset())); |
1363 __ mulsd(XMM0, XMM1); | 1365 __ mulsd(XMM0, XMM1); |
1364 const Class& double_class = Class::Handle( | 1366 const Class& double_class = Class::Handle( |
1365 Isolate::Current()->object_store()->double_class()); | 1367 Isolate::Current()->object_store()->double_class()); |
1366 __ TryAllocate(double_class, | 1368 __ TryAllocate(double_class, |
1367 &fall_through, | 1369 &fall_through, |
1368 Assembler::kFarJump, | 1370 Assembler::kFarJump, |
1369 RAX, // Result register. | 1371 RAX, // Result register. |
1370 kNoRegister); // Pool pointer might not be loaded. | 1372 kNoRegister, // Pool pointer might not be loaded. |
| 1373 R13); |
1371 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); | 1374 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); |
1372 __ ret(); | 1375 __ ret(); |
1373 __ Bind(&fall_through); | 1376 __ Bind(&fall_through); |
1374 } | 1377 } |
1375 | 1378 |
1376 | 1379 |
1377 // Left is double right is integer (Bigint, Mint or Smi) | 1380 // Left is double right is integer (Bigint, Mint or Smi) |
1378 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { | 1381 void Intrinsifier::DoubleFromInteger(Assembler* assembler) { |
1379 Label fall_through; | 1382 Label fall_through; |
1380 __ movq(RAX, Address(RSP, +1 * kWordSize)); | 1383 __ movq(RAX, Address(RSP, +1 * kWordSize)); |
1381 __ testq(RAX, Immediate(kSmiTagMask)); | 1384 __ testq(RAX, Immediate(kSmiTagMask)); |
1382 __ j(NOT_ZERO, &fall_through); | 1385 __ j(NOT_ZERO, &fall_through); |
1383 // Is Smi. | 1386 // Is Smi. |
1384 __ SmiUntag(RAX); | 1387 __ SmiUntag(RAX); |
1385 __ cvtsi2sdq(XMM0, RAX); | 1388 __ cvtsi2sdq(XMM0, RAX); |
1386 const Class& double_class = Class::Handle( | 1389 const Class& double_class = Class::Handle( |
1387 Isolate::Current()->object_store()->double_class()); | 1390 Isolate::Current()->object_store()->double_class()); |
1388 __ TryAllocate(double_class, | 1391 __ TryAllocate(double_class, |
1389 &fall_through, | 1392 &fall_through, |
1390 Assembler::kFarJump, | 1393 Assembler::kFarJump, |
1391 RAX, // Result register. | 1394 RAX, // Result register. |
1392 kNoRegister); // Pool pointer might not be loaded. | 1395 kNoRegister, // Pool pointer might not be loaded. |
| 1396 R13); |
1393 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); | 1397 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); |
1394 __ ret(); | 1398 __ ret(); |
1395 __ Bind(&fall_through); | 1399 __ Bind(&fall_through); |
1396 } | 1400 } |
1397 | 1401 |
1398 | 1402 |
1399 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { | 1403 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { |
1400 Label is_true; | 1404 Label is_true; |
1401 __ movq(RAX, Address(RSP, +1 * kWordSize)); | 1405 __ movq(RAX, Address(RSP, +1 * kWordSize)); |
1402 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset())); | 1406 __ movsd(XMM0, FieldAddress(RAX, Double::value_offset())); |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1456 // Argument is double and is in RAX. | 1460 // Argument is double and is in RAX. |
1457 __ movsd(XMM1, FieldAddress(RAX, Double::value_offset())); | 1461 __ movsd(XMM1, FieldAddress(RAX, Double::value_offset())); |
1458 __ Bind(&double_op); | 1462 __ Bind(&double_op); |
1459 __ sqrtsd(XMM0, XMM1); | 1463 __ sqrtsd(XMM0, XMM1); |
1460 const Class& double_class = Class::Handle( | 1464 const Class& double_class = Class::Handle( |
1461 Isolate::Current()->object_store()->double_class()); | 1465 Isolate::Current()->object_store()->double_class()); |
1462 __ TryAllocate(double_class, | 1466 __ TryAllocate(double_class, |
1463 &fall_through, | 1467 &fall_through, |
1464 Assembler::kFarJump, | 1468 Assembler::kFarJump, |
1465 RAX, // Result register. | 1469 RAX, // Result register. |
1466 kNoRegister); // Pool pointer might not be loaded. | 1470 kNoRegister, // Pool pointer might not be loaded. |
| 1471 R13); |
1467 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); | 1472 __ movsd(FieldAddress(RAX, Double::value_offset()), XMM0); |
1468 __ ret(); | 1473 __ ret(); |
1469 __ Bind(&is_smi); | 1474 __ Bind(&is_smi); |
1470 __ SmiUntag(RAX); | 1475 __ SmiUntag(RAX); |
1471 __ cvtsi2sdq(XMM1, RAX); | 1476 __ cvtsi2sdq(XMM1, RAX); |
1472 __ jmp(&double_op); | 1477 __ jmp(&double_op); |
1473 __ Bind(&fall_through); | 1478 __ Bind(&fall_through); |
1474 } | 1479 } |
1475 | 1480 |
1476 | 1481 |
(...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1730 } | 1735 } |
1731 | 1736 |
1732 | 1737 |
1733 // Allocates one-byte string of length 'end - start'. The content is not | 1738 // Allocates one-byte string of length 'end - start'. The content is not |
1734 // initialized. 'length-reg' contains tagged length. | 1739 // initialized. 'length-reg' contains tagged length. |
1735 // Returns new string as tagged pointer in RAX. | 1740 // Returns new string as tagged pointer in RAX. |
1736 static void TryAllocateOnebyteString(Assembler* assembler, | 1741 static void TryAllocateOnebyteString(Assembler* assembler, |
1737 Label* ok, | 1742 Label* ok, |
1738 Label* failure, | 1743 Label* failure, |
1739 Register length_reg) { | 1744 Register length_reg) { |
1740 __ MaybeTraceAllocation(kOneByteStringCid, failure, false); | 1745 __ MaybeTraceAllocation(kOneByteStringCid, failure, false, |
| 1746 /* inline_isolate = */ false); |
1741 if (length_reg != RDI) { | 1747 if (length_reg != RDI) { |
1742 __ movq(RDI, length_reg); | 1748 __ movq(RDI, length_reg); |
1743 } | 1749 } |
1744 Label pop_and_fail; | 1750 Label pop_and_fail; |
1745 __ pushq(RDI); // Preserve length. | 1751 __ pushq(RDI); // Preserve length. |
1746 __ SmiUntag(RDI); | 1752 __ SmiUntag(RDI); |
1747 const intptr_t fixed_size = sizeof(RawString) + kObjectAlignment - 1; | 1753 const intptr_t fixed_size = sizeof(RawString) + kObjectAlignment - 1; |
1748 __ leaq(RDI, Address(RDI, TIMES_1, fixed_size)); // RDI is a Smi. | 1754 __ leaq(RDI, Address(RDI, TIMES_1, fixed_size)); // RDI is a Smi. |
1749 __ andq(RDI, Immediate(-kObjectAlignment)); | 1755 __ andq(RDI, Immediate(-kObjectAlignment)); |
1750 | 1756 |
1751 Isolate* isolate = Isolate::Current(); | |
1752 Heap* heap = isolate->heap(); | |
1753 const intptr_t cid = kOneByteStringCid; | 1757 const intptr_t cid = kOneByteStringCid; |
1754 Heap::Space space = heap->SpaceForAllocation(cid); | 1758 Heap::Space space = Heap::SpaceForAllocation(cid); |
1755 __ movq(RAX, Immediate(heap->TopAddress(space))); | 1759 __ movq(R13, Address(THR, Thread::heap_offset())); |
1756 __ movq(RAX, Address(RAX, 0)); | 1760 __ movq(RAX, Address(R13, Heap::TopOffset(space))); |
1757 | 1761 |
1758 // RDI: allocation size. | 1762 // RDI: allocation size. |
1759 __ movq(RCX, RAX); | 1763 __ movq(RCX, RAX); |
1760 __ addq(RCX, RDI); | 1764 __ addq(RCX, RDI); |
1761 __ j(CARRY, &pop_and_fail); | 1765 __ j(CARRY, &pop_and_fail); |
1762 | 1766 |
1763 // Check if the allocation fits into the remaining space. | 1767 // Check if the allocation fits into the remaining space. |
1764 // RAX: potential new object start. | 1768 // RAX: potential new object start. |
1765 // RCX: potential next object start. | 1769 // RCX: potential next object start. |
1766 // RDI: allocation size. | 1770 // RDI: allocation size. |
1767 __ movq(R13, Immediate(heap->EndAddress(space))); | 1771 // R13: heap. |
1768 __ cmpq(RCX, Address(R13, 0)); | 1772 __ cmpq(RCX, Address(R13, Heap::EndOffset(space))); |
1769 __ j(ABOVE_EQUAL, &pop_and_fail); | 1773 __ j(ABOVE_EQUAL, &pop_and_fail); |
1770 | 1774 |
1771 // Successfully allocated the object(s), now update top to point to | 1775 // Successfully allocated the object(s), now update top to point to |
1772 // next object start and initialize the object. | 1776 // next object start and initialize the object. |
1773 __ movq(R13, Immediate(heap->TopAddress(space))); | 1777 __ movq(Address(R13, Heap::TopOffset(space)), RCX); |
1774 __ movq(Address(R13, 0), RCX); | |
1775 __ addq(RAX, Immediate(kHeapObjectTag)); | 1778 __ addq(RAX, Immediate(kHeapObjectTag)); |
1776 __ UpdateAllocationStatsWithSize(cid, RDI, space); | 1779 __ UpdateAllocationStatsWithSize(cid, RDI, space, |
| 1780 /* inline_isolate = */ false); |
1777 | 1781 |
1778 // Initialize the tags. | 1782 // Initialize the tags. |
1779 // RAX: new object start as a tagged pointer. | 1783 // RAX: new object start as a tagged pointer. |
1780 // RDI: allocation size. | 1784 // RDI: allocation size. |
1781 { | 1785 { |
1782 Label size_tag_overflow, done; | 1786 Label size_tag_overflow, done; |
1783 __ cmpq(RDI, Immediate(RawObject::SizeTag::kMaxSizeTag)); | 1787 __ cmpq(RDI, Immediate(RawObject::SizeTag::kMaxSizeTag)); |
1784 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump); | 1788 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump); |
1785 __ shlq(RDI, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2)); | 1789 __ shlq(RDI, Immediate(RawObject::kSizeTagPos - kObjectAlignmentLog2)); |
1786 __ jmp(&done, Assembler::kNearJump); | 1790 __ jmp(&done, Assembler::kNearJump); |
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2013 __ LoadIsolate(RAX); | 2017 __ LoadIsolate(RAX); |
2014 __ movq(RAX, Address(RAX, Isolate::current_tag_offset())); | 2018 __ movq(RAX, Address(RAX, Isolate::current_tag_offset())); |
2015 __ ret(); | 2019 __ ret(); |
2016 } | 2020 } |
2017 | 2021 |
2018 #undef __ | 2022 #undef __ |
2019 | 2023 |
2020 } // namespace dart | 2024 } // namespace dart |
2021 | 2025 |
2022 #endif // defined TARGET_ARCH_X64 | 2026 #endif // defined TARGET_ARCH_X64 |
OLD | NEW |