OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. |
6 #if defined(TARGET_ARCH_MIPS) | 6 #if defined(TARGET_ARCH_MIPS) |
7 | 7 |
8 #include "vm/intrinsifier.h" | 8 #include "vm/intrinsifier.h" |
9 | 9 |
10 #include "vm/assembler.h" | 10 #include "vm/assembler.h" |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
64 // T1: potential next object start. | 64 // T1: potential next object start. |
65 // T2: allocation size. | 65 // T2: allocation size. |
66 __ LoadImmediate(T4, heap->TopAddress()); | 66 __ LoadImmediate(T4, heap->TopAddress()); |
67 __ lw(T4, Address(T4, 0)); | 67 __ lw(T4, Address(T4, 0)); |
68 __ BranchUnsignedGreaterEqual(T1, T4, &fall_through); | 68 __ BranchUnsignedGreaterEqual(T1, T4, &fall_through); |
69 | 69 |
70 // Successfully allocated the object(s), now update top to point to | 70 // Successfully allocated the object(s), now update top to point to |
71 // next object start and initialize the object. | 71 // next object start and initialize the object. |
72 __ sw(T1, Address(T3, 0)); | 72 __ sw(T1, Address(T3, 0)); |
73 __ addiu(T0, T0, Immediate(kHeapObjectTag)); | 73 __ addiu(T0, T0, Immediate(kHeapObjectTag)); |
| 74 __ BumpAllocationCount(Heap::kNew, kArrayCid, T2, T4); |
74 | 75 |
75 // Initialize the tags. | 76 // Initialize the tags. |
76 // T0: new object start as a tagged pointer. | 77 // T0: new object start as a tagged pointer. |
77 // T1: new object end address. | 78 // T1: new object end address. |
78 // T2: allocation size. | 79 // T2: allocation size. |
79 { | 80 { |
80 Label overflow, done; | 81 Label overflow, done; |
81 const intptr_t shift = RawObject::kSizeTagBit - kObjectAlignmentLog2; | 82 const intptr_t shift = RawObject::kSizeTagBit - kObjectAlignmentLog2; |
82 const Class& cls = Class::Handle(isolate->object_store()->array_class()); | 83 const Class& cls = Class::Handle(isolate->object_store()->array_class()); |
83 | 84 |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
119 __ AddImmediate(T2, T0, sizeof(RawArray) - kHeapObjectTag); | 120 __ AddImmediate(T2, T0, sizeof(RawArray) - kHeapObjectTag); |
120 | 121 |
121 Label done; | 122 Label done; |
122 Label init_loop; | 123 Label init_loop; |
123 __ Bind(&init_loop); | 124 __ Bind(&init_loop); |
124 __ BranchUnsignedGreaterEqual(T2, T1, &done); | 125 __ BranchUnsignedGreaterEqual(T2, T1, &done); |
125 __ sw(T7, Address(T2, 0)); | 126 __ sw(T7, Address(T2, 0)); |
126 __ b(&init_loop); | 127 __ b(&init_loop); |
127 __ delay_slot()->addiu(T2, T2, Immediate(kWordSize)); | 128 __ delay_slot()->addiu(T2, T2, Immediate(kWordSize)); |
128 __ Bind(&done); | 129 __ Bind(&done); |
129 | |
130 __ Ret(); // Returns the newly allocated object in V0. | 130 __ Ret(); // Returns the newly allocated object in V0. |
131 __ delay_slot()->mov(V0, T0); | 131 __ delay_slot()->mov(V0, T0); |
132 __ Bind(&fall_through); | 132 __ Bind(&fall_through); |
133 } | 133 } |
134 | 134 |
135 | 135 |
136 void Intrinsifier::Array_getLength(Assembler* assembler) { | 136 void Intrinsifier::Array_getLength(Assembler* assembler) { |
137 __ lw(V0, Address(SP, 0 * kWordSize)); | 137 __ lw(V0, Address(SP, 0 * kWordSize)); |
138 __ Ret(); | 138 __ Ret(); |
139 __ delay_slot()->lw(V0, FieldAddress(V0, Array::length_offset())); | 139 __ delay_slot()->lw(V0, FieldAddress(V0, Array::length_offset())); |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
299 FieldAddress(V0, GrowableObjectArray::data_offset()), | 299 FieldAddress(V0, GrowableObjectArray::data_offset()), |
300 T1); | 300 T1); |
301 | 301 |
302 // V0: new growable array object start as a tagged pointer. | 302 // V0: new growable array object start as a tagged pointer. |
303 // Store the type argument field in the growable array object. | 303 // Store the type argument field in the growable array object. |
304 __ lw(T1, Address(SP, kTypeArgumentsOffset)); // Type argument. | 304 __ lw(T1, Address(SP, kTypeArgumentsOffset)); // Type argument. |
305 __ StoreIntoObjectNoBarrier( | 305 __ StoreIntoObjectNoBarrier( |
306 V0, | 306 V0, |
307 FieldAddress(V0, GrowableObjectArray::type_arguments_offset()), | 307 FieldAddress(V0, GrowableObjectArray::type_arguments_offset()), |
308 T1); | 308 T1); |
309 | 309 __ BumpAllocationCount(Heap::kNew, kGrowableObjectArrayCid, T1); |
310 // Set the length field in the growable array object to 0. | 310 // Set the length field in the growable array object to 0. |
311 __ Ret(); // Returns the newly allocated object in V0. | 311 __ Ret(); // Returns the newly allocated object in V0. |
312 __ delay_slot()->sw(ZR, | 312 __ delay_slot()->sw(ZR, |
313 FieldAddress(V0, GrowableObjectArray::length_offset())); | 313 FieldAddress(V0, GrowableObjectArray::length_offset())); |
314 | 314 |
315 __ Bind(&fall_through); | 315 __ Bind(&fall_through); |
316 } | 316 } |
317 | 317 |
318 | 318 |
319 void Intrinsifier::GrowableList_getLength(Assembler* assembler) { | 319 void Intrinsifier::GrowableList_getLength(Assembler* assembler) { |
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
491 /* T2: allocation size. */ \ | 491 /* T2: allocation size. */ \ |
492 __ LoadImmediate(T3, heap->EndAddress()); \ | 492 __ LoadImmediate(T3, heap->EndAddress()); \ |
493 __ lw(T3, Address(T3, 0)); \ | 493 __ lw(T3, Address(T3, 0)); \ |
494 __ BranchUnsignedGreaterEqual(T1, T3, &fall_through); \ | 494 __ BranchUnsignedGreaterEqual(T1, T3, &fall_through); \ |
495 \ | 495 \ |
496 /* Successfully allocated the object(s), now update top to point to */ \ | 496 /* Successfully allocated the object(s), now update top to point to */ \ |
497 /* next object start and initialize the object. */ \ | 497 /* next object start and initialize the object. */ \ |
498 __ LoadImmediate(T3, heap->TopAddress()); \ | 498 __ LoadImmediate(T3, heap->TopAddress()); \ |
499 __ sw(T1, Address(T3, 0)); \ | 499 __ sw(T1, Address(T3, 0)); \ |
500 __ AddImmediate(V0, kHeapObjectTag); \ | 500 __ AddImmediate(V0, kHeapObjectTag); \ |
501 \ | 501 __ BumpAllocationCount(Heap::kNew, cid, T2, T4); \ |
502 /* Initialize the tags. */ \ | 502 /* Initialize the tags. */ \ |
503 /* V0: new object start as a tagged pointer. */ \ | 503 /* V0: new object start as a tagged pointer. */ \ |
504 /* T1: new object end address. */ \ | 504 /* T1: new object end address. */ \ |
505 /* T2: allocation size. */ \ | 505 /* T2: allocation size. */ \ |
506 { \ | 506 { \ |
507 Label size_tag_overflow, done; \ | 507 Label size_tag_overflow, done; \ |
508 __ BranchUnsignedGreater(T2, RawObject::SizeTag::kMaxSizeTag, \ | 508 __ BranchUnsignedGreater(T2, RawObject::SizeTag::kMaxSizeTag, \ |
509 &size_tag_overflow); \ | 509 &size_tag_overflow); \ |
510 __ b(&done); \ | 510 __ b(&done); \ |
511 __ delay_slot()->sll(T2, T2, \ | 511 __ delay_slot()->sll(T2, T2, \ |
(...skipping 21 matching lines...) Expand all Loading... |
533 /* T2: iterator which initially points to the start of the variable */ \ | 533 /* T2: iterator which initially points to the start of the variable */ \ |
534 /* data area to be initialized. */ \ | 534 /* data area to be initialized. */ \ |
535 __ AddImmediate(T2, V0, sizeof(Raw##type_name) - 1); \ | 535 __ AddImmediate(T2, V0, sizeof(Raw##type_name) - 1); \ |
536 Label done, init_loop; \ | 536 Label done, init_loop; \ |
537 __ Bind(&init_loop); \ | 537 __ Bind(&init_loop); \ |
538 __ BranchUnsignedGreaterEqual(T2, T1, &done); \ | 538 __ BranchUnsignedGreaterEqual(T2, T1, &done); \ |
539 __ sw(ZR, Address(T2, 0)); \ | 539 __ sw(ZR, Address(T2, 0)); \ |
540 __ b(&init_loop); \ | 540 __ b(&init_loop); \ |
541 __ delay_slot()->addiu(T2, T2, Immediate(kWordSize)); \ | 541 __ delay_slot()->addiu(T2, T2, Immediate(kWordSize)); \ |
542 __ Bind(&done); \ | 542 __ Bind(&done); \ |
543 \ | |
544 __ Ret(); \ | 543 __ Ret(); \ |
545 __ Bind(&fall_through); \ | 544 __ Bind(&fall_through); \ |
546 | 545 |
547 | 546 |
548 // Gets the length of a TypedData. | 547 // Gets the length of a TypedData. |
549 void Intrinsifier::TypedData_getLength(Assembler* assembler) { | 548 void Intrinsifier::TypedData_getLength(Assembler* assembler) { |
550 __ lw(T0, Address(SP, 0 * kWordSize)); | 549 __ lw(T0, Address(SP, 0 * kWordSize)); |
551 __ Ret(); | 550 __ Ret(); |
552 __ delay_slot()->lw(V0, FieldAddress(T0, TypedData::length_offset())); | 551 __ delay_slot()->lw(V0, FieldAddress(T0, TypedData::length_offset())); |
553 } | 552 } |
(...skipping 293 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
847 __ subu(T4, ZR, T0); // T4 <- -T0 | 846 __ subu(T4, ZR, T0); // T4 <- -T0 |
848 __ addiu(T4, T4, Immediate(32)); // T4 <- 32 - T0 | 847 __ addiu(T4, T4, Immediate(32)); // T4 <- 32 - T0 |
849 __ sllv(T3, T3, T4); // T3 <- T3 << T4 | 848 __ sllv(T3, T3, T4); // T3 <- T3 << T4 |
850 __ and_(T3, T3, T1); // T3 <- T3 & T1 | 849 __ and_(T3, T3, T1); // T3 <- T3 & T1 |
851 __ srlv(T3, T3, T4); // T3 <- T3 >> T4 | 850 __ srlv(T3, T3, T4); // T3 <- T3 >> T4 |
852 // Now T3 has the bits that fall off of T1 on a left shift. | 851 // Now T3 has the bits that fall off of T1 on a left shift. |
853 __ sllv(T0, T1, T0); // T0 gets low bits. | 852 __ sllv(T0, T1, T0); // T0 gets low bits. |
854 | 853 |
855 const Class& mint_class = Class::Handle( | 854 const Class& mint_class = Class::Handle( |
856 Isolate::Current()->object_store()->mint_class()); | 855 Isolate::Current()->object_store()->mint_class()); |
857 __ TryAllocate(mint_class, &fall_through, V0); | 856 __ TryAllocate(mint_class, &fall_through, V0, T1); |
858 | 857 |
859 __ sw(T0, FieldAddress(V0, Mint::value_offset())); | 858 __ sw(T0, FieldAddress(V0, Mint::value_offset())); |
860 __ Ret(); | 859 __ Ret(); |
861 __ delay_slot()->sw(T3, FieldAddress(V0, Mint::value_offset() + kWordSize)); | 860 __ delay_slot()->sw(T3, FieldAddress(V0, Mint::value_offset() + kWordSize)); |
862 __ Bind(&fall_through); | 861 __ Bind(&fall_through); |
863 } | 862 } |
864 | 863 |
865 | 864 |
866 static void Get64SmiOrMint(Assembler* assembler, | 865 static void Get64SmiOrMint(Assembler* assembler, |
867 Register res_hi, | 866 Register res_hi, |
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1187 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); | 1186 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); |
1188 switch (kind) { | 1187 switch (kind) { |
1189 case Token::kADD: __ addd(D0, D0, D1); break; | 1188 case Token::kADD: __ addd(D0, D0, D1); break; |
1190 case Token::kSUB: __ subd(D0, D0, D1); break; | 1189 case Token::kSUB: __ subd(D0, D0, D1); break; |
1191 case Token::kMUL: __ muld(D0, D0, D1); break; | 1190 case Token::kMUL: __ muld(D0, D0, D1); break; |
1192 case Token::kDIV: __ divd(D0, D0, D1); break; | 1191 case Token::kDIV: __ divd(D0, D0, D1); break; |
1193 default: UNREACHABLE(); | 1192 default: UNREACHABLE(); |
1194 } | 1193 } |
1195 const Class& double_class = Class::Handle( | 1194 const Class& double_class = Class::Handle( |
1196 Isolate::Current()->object_store()->double_class()); | 1195 Isolate::Current()->object_store()->double_class()); |
1197 __ TryAllocate(double_class, &fall_through, V0); // Result register. | 1196 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. |
1198 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | 1197 __ swc1(F0, FieldAddress(V0, Double::value_offset())); |
1199 __ Ret(); | 1198 __ Ret(); |
1200 __ delay_slot()->swc1(F1, | 1199 __ delay_slot()->swc1(F1, |
1201 FieldAddress(V0, Double::value_offset() + kWordSize)); | 1200 FieldAddress(V0, Double::value_offset() + kWordSize)); |
1202 __ Bind(&fall_through); | 1201 __ Bind(&fall_through); |
1203 } | 1202 } |
1204 | 1203 |
1205 | 1204 |
1206 void Intrinsifier::Double_add(Assembler* assembler) { | 1205 void Intrinsifier::Double_add(Assembler* assembler) { |
1207 return DoubleArithmeticOperations(assembler, Token::kADD); | 1206 return DoubleArithmeticOperations(assembler, Token::kADD); |
(...skipping 27 matching lines...) Expand all Loading... |
1235 __ SmiUntag(T0); | 1234 __ SmiUntag(T0); |
1236 __ mtc1(T0, F4); | 1235 __ mtc1(T0, F4); |
1237 __ cvtdw(D1, F4); | 1236 __ cvtdw(D1, F4); |
1238 | 1237 |
1239 __ lw(T0, Address(SP, 1 * kWordSize)); | 1238 __ lw(T0, Address(SP, 1 * kWordSize)); |
1240 __ lwc1(F0, FieldAddress(T0, Double::value_offset())); | 1239 __ lwc1(F0, FieldAddress(T0, Double::value_offset())); |
1241 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); | 1240 __ lwc1(F1, FieldAddress(T0, Double::value_offset() + kWordSize)); |
1242 __ muld(D0, D0, D1); | 1241 __ muld(D0, D0, D1); |
1243 const Class& double_class = Class::Handle( | 1242 const Class& double_class = Class::Handle( |
1244 Isolate::Current()->object_store()->double_class()); | 1243 Isolate::Current()->object_store()->double_class()); |
1245 __ TryAllocate(double_class, &fall_through, V0); // Result register. | 1244 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. |
1246 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | 1245 __ swc1(F0, FieldAddress(V0, Double::value_offset())); |
1247 __ Ret(); | 1246 __ Ret(); |
1248 __ delay_slot()->swc1(F1, | 1247 __ delay_slot()->swc1(F1, |
1249 FieldAddress(V0, Double::value_offset() + kWordSize)); | 1248 FieldAddress(V0, Double::value_offset() + kWordSize)); |
1250 __ Bind(&fall_through); | 1249 __ Bind(&fall_through); |
1251 } | 1250 } |
1252 | 1251 |
1253 | 1252 |
1254 void Intrinsifier::Double_fromInteger(Assembler* assembler) { | 1253 void Intrinsifier::Double_fromInteger(Assembler* assembler) { |
1255 Label fall_through; | 1254 Label fall_through; |
1256 | 1255 |
1257 __ lw(T0, Address(SP, 0 * kWordSize)); | 1256 __ lw(T0, Address(SP, 0 * kWordSize)); |
1258 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); | 1257 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); |
1259 __ bne(T0, ZR, &fall_through); | 1258 __ bne(T0, ZR, &fall_through); |
1260 | 1259 |
1261 // Is Smi. | 1260 // Is Smi. |
1262 __ SmiUntag(T0); | 1261 __ SmiUntag(T0); |
1263 __ mtc1(T0, F4); | 1262 __ mtc1(T0, F4); |
1264 __ cvtdw(D0, F4); | 1263 __ cvtdw(D0, F4); |
1265 const Class& double_class = Class::Handle( | 1264 const Class& double_class = Class::Handle( |
1266 Isolate::Current()->object_store()->double_class()); | 1265 Isolate::Current()->object_store()->double_class()); |
1267 __ TryAllocate(double_class, &fall_through, V0); // Result register. | 1266 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. |
1268 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | 1267 __ swc1(F0, FieldAddress(V0, Double::value_offset())); |
1269 __ Ret(); | 1268 __ Ret(); |
1270 __ delay_slot()->swc1(F1, | 1269 __ delay_slot()->swc1(F1, |
1271 FieldAddress(V0, Double::value_offset() + kWordSize)); | 1270 FieldAddress(V0, Double::value_offset() + kWordSize)); |
1272 __ Bind(&fall_through); | 1271 __ Bind(&fall_through); |
1273 } | 1272 } |
1274 | 1273 |
1275 | 1274 |
1276 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { | 1275 void Intrinsifier::Double_getIsNaN(Assembler* assembler) { |
1277 Label is_true; | 1276 Label is_true; |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1343 | 1342 |
1344 void Intrinsifier::Math_sqrt(Assembler* assembler) { | 1343 void Intrinsifier::Math_sqrt(Assembler* assembler) { |
1345 Label fall_through, is_smi, double_op; | 1344 Label fall_through, is_smi, double_op; |
1346 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); | 1345 TestLastArgumentIsDouble(assembler, &is_smi, &fall_through); |
1347 // Argument is double and is in T0. | 1346 // Argument is double and is in T0. |
1348 __ LoadDFromOffset(D1, T0, Double::value_offset() - kHeapObjectTag); | 1347 __ LoadDFromOffset(D1, T0, Double::value_offset() - kHeapObjectTag); |
1349 __ Bind(&double_op); | 1348 __ Bind(&double_op); |
1350 __ sqrtd(D0, D1); | 1349 __ sqrtd(D0, D1); |
1351 const Class& double_class = Class::Handle( | 1350 const Class& double_class = Class::Handle( |
1352 Isolate::Current()->object_store()->double_class()); | 1351 Isolate::Current()->object_store()->double_class()); |
1353 __ TryAllocate(double_class, &fall_through, V0); // Result register. | 1352 __ TryAllocate(double_class, &fall_through, V0, T1); // Result register. |
1354 __ swc1(F0, FieldAddress(V0, Double::value_offset())); | 1353 __ swc1(F0, FieldAddress(V0, Double::value_offset())); |
1355 __ Ret(); | 1354 __ Ret(); |
1356 __ delay_slot()->swc1(F1, | 1355 __ delay_slot()->swc1(F1, |
1357 FieldAddress(V0, Double::value_offset() + kWordSize)); | 1356 FieldAddress(V0, Double::value_offset() + kWordSize)); |
1358 | 1357 |
1359 __ Bind(&is_smi); | 1358 __ Bind(&is_smi); |
1360 __ SmiUntag(T0); | 1359 __ SmiUntag(T0); |
1361 __ mtc1(T0, F2); | 1360 __ mtc1(T0, F2); |
1362 __ b(&double_op); | 1361 __ b(&double_op); |
1363 __ delay_slot()->cvtdw(D1, F2); | 1362 __ delay_slot()->cvtdw(D1, F2); |
(...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1600 // T3: heap->TopAddress(). | 1599 // T3: heap->TopAddress(). |
1601 __ LoadImmediate(T4, heap->EndAddress()); | 1600 __ LoadImmediate(T4, heap->EndAddress()); |
1602 __ lw(T4, Address(T4, 0)); | 1601 __ lw(T4, Address(T4, 0)); |
1603 __ BranchUnsignedGreaterEqual(T1, T4, failure); | 1602 __ BranchUnsignedGreaterEqual(T1, T4, failure); |
1604 | 1603 |
1605 // Successfully allocated the object(s), now update top to point to | 1604 // Successfully allocated the object(s), now update top to point to |
1606 // next object start and initialize the object. | 1605 // next object start and initialize the object. |
1607 __ sw(T1, Address(T3, 0)); | 1606 __ sw(T1, Address(T3, 0)); |
1608 __ AddImmediate(V0, kHeapObjectTag); | 1607 __ AddImmediate(V0, kHeapObjectTag); |
1609 | 1608 |
| 1609 __ BumpAllocationCount(Heap::kNew, kOneByteStringCid, T2, T3); |
| 1610 |
1610 // Initialize the tags. | 1611 // Initialize the tags. |
1611 // V0: new object start as a tagged pointer. | 1612 // V0: new object start as a tagged pointer. |
1612 // T1: new object end address. | 1613 // T1: new object end address. |
1613 // T2: allocation size. | 1614 // T2: allocation size. |
1614 { | 1615 { |
1615 Label overflow, done; | 1616 Label overflow, done; |
1616 const intptr_t shift = RawObject::kSizeTagBit - kObjectAlignmentLog2; | 1617 const intptr_t shift = RawObject::kSizeTagBit - kObjectAlignmentLog2; |
1617 const Class& cls = | 1618 const Class& cls = |
1618 Class::Handle(isolate->object_store()->one_byte_string_class()); | 1619 Class::Handle(isolate->object_store()->one_byte_string_class()); |
1619 | 1620 |
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1783 } | 1784 } |
1784 | 1785 |
1785 | 1786 |
1786 void Intrinsifier::TwoByteString_equality(Assembler* assembler) { | 1787 void Intrinsifier::TwoByteString_equality(Assembler* assembler) { |
1787 StringEquality(assembler, kTwoByteStringCid); | 1788 StringEquality(assembler, kTwoByteStringCid); |
1788 } | 1789 } |
1789 | 1790 |
1790 } // namespace dart | 1791 } // namespace dart |
1791 | 1792 |
1792 #endif // defined TARGET_ARCH_MIPS | 1793 #endif // defined TARGET_ARCH_MIPS |
OLD | NEW |