Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(108)

Side by Side Diff: runtime/vm/flow_graph_compiler_ia32.cc

Issue 11956004: Fix vm code base so that it can be built for --arch=simarm (no snapshot yet). (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32.
6 #if defined(TARGET_ARCH_IA32) 6 #if defined(TARGET_ARCH_IA32)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "lib/error.h" 10 #include "lib/error.h"
11 #include "vm/ast_printer.h" 11 #include "vm/ast_printer.h"
(...skipping 1272 matching lines...) Expand 10 before | Expand all | Expand 10 after
1284 __ Drop(1); 1284 __ Drop(1);
1285 __ jmp(skip_call); 1285 __ jmp(skip_call);
1286 __ Bind(&is_false); 1286 __ Bind(&is_false);
1287 __ LoadObject(result, Bool::False()); 1287 __ LoadObject(result, Bool::False());
1288 __ Drop(1); 1288 __ Drop(1);
1289 __ jmp(skip_call); 1289 __ jmp(skip_call);
1290 __ Bind(&fall_through); 1290 __ Bind(&fall_through);
1291 } 1291 }
1292 1292
1293 1293
1294 void FlowGraphCompiler::LoadDoubleOrSmiToXmm(XmmRegister result, 1294 void FlowGraphCompiler::LoadDoubleOrSmiToFpu(XmmRegister result,
Ivan Posva 2013/01/16 01:17:56 XmmRegister -> FpuRegister to match the header.
regis 2013/01/16 01:55:07 Done.
1295 Register reg, 1295 Register reg,
1296 Register temp, 1296 Register temp,
1297 Label* not_double_or_smi) { 1297 Label* not_double_or_smi) {
1298 Label is_smi, done; 1298 Label is_smi, done;
1299 __ testl(reg, Immediate(kSmiTagMask)); 1299 __ testl(reg, Immediate(kSmiTagMask));
1300 __ j(ZERO, &is_smi); 1300 __ j(ZERO, &is_smi);
1301 __ CompareClassId(reg, kDoubleCid, temp); 1301 __ CompareClassId(reg, kDoubleCid, temp);
1302 __ j(NOT_EQUAL, not_double_or_smi); 1302 __ j(NOT_EQUAL, not_double_or_smi);
1303 __ movsd(result, FieldAddress(reg, Double::value_offset())); 1303 __ movsd(result, FieldAddress(reg, Double::value_offset()));
1304 __ jmp(&done); 1304 __ jmp(&done);
1305 __ Bind(&is_smi); 1305 __ Bind(&is_smi);
1306 __ movl(temp, reg); 1306 __ movl(temp, reg);
1307 __ SmiUntag(temp); 1307 __ SmiUntag(temp);
1308 __ cvtsi2sd(result, temp); 1308 __ cvtsi2sd(result, temp);
1309 __ Bind(&done); 1309 __ Bind(&done);
1310 } 1310 }
1311 1311
1312 1312
1313 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { 1313 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
1314 // TODO(vegorov): consider saving only caller save (volatile) registers. 1314 // TODO(vegorov): consider saving only caller save (volatile) registers.
1315 const intptr_t xmm_regs_count = locs->live_registers()->xmm_regs_count(); 1315 const intptr_t xmm_regs_count = locs->live_registers()->fpu_regs_count();
1316 if (xmm_regs_count > 0) { 1316 if (xmm_regs_count > 0) {
1317 __ subl(ESP, Immediate(xmm_regs_count * kDoubleSize)); 1317 __ subl(ESP, Immediate(xmm_regs_count * kDoubleSize));
1318 // Store XMM registers with the lowest register number at the lowest 1318 // Store XMM registers with the lowest register number at the lowest
1319 // address. 1319 // address.
1320 intptr_t offset = 0; 1320 intptr_t offset = 0;
1321 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { 1321 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
1322 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); 1322 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
1323 if (locs->live_registers()->ContainsXmmRegister(xmm_reg)) { 1323 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
1324 __ movsd(Address(ESP, offset), xmm_reg); 1324 __ movsd(Address(ESP, offset), xmm_reg);
1325 offset += kDoubleSize; 1325 offset += kDoubleSize;
1326 } 1326 }
1327 } 1327 }
1328 ASSERT(offset == (xmm_regs_count * kDoubleSize)); 1328 ASSERT(offset == (xmm_regs_count * kDoubleSize));
1329 } 1329 }
1330 1330
1331 // Store general purpose registers with the highest register number at the 1331 // Store general purpose registers with the highest register number at the
1332 // lowest address. 1332 // lowest address.
1333 for (intptr_t reg_idx = 0; reg_idx < kNumberOfCpuRegisters; ++reg_idx) { 1333 for (intptr_t reg_idx = 0; reg_idx < kNumberOfCpuRegisters; ++reg_idx) {
1334 Register reg = static_cast<Register>(reg_idx); 1334 Register reg = static_cast<Register>(reg_idx);
1335 if (locs->live_registers()->ContainsRegister(reg)) { 1335 if (locs->live_registers()->ContainsRegister(reg)) {
1336 __ pushl(reg); 1336 __ pushl(reg);
1337 } 1337 }
1338 } 1338 }
1339 } 1339 }
1340 1340
1341 1341
1342 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { 1342 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
1343 // General purpose registers have the highest register number at the 1343 // General purpose registers have the highest register number at the
1344 // lowest address. 1344 // lowest address.
1345 for (intptr_t reg_idx = kNumberOfCpuRegisters - 1; reg_idx >= 0; --reg_idx) { 1345 for (intptr_t reg_idx = kNumberOfCpuRegisters - 1; reg_idx >= 0; --reg_idx) {
1346 Register reg = static_cast<Register>(reg_idx); 1346 Register reg = static_cast<Register>(reg_idx);
1347 if (locs->live_registers()->ContainsRegister(reg)) { 1347 if (locs->live_registers()->ContainsRegister(reg)) {
1348 __ popl(reg); 1348 __ popl(reg);
1349 } 1349 }
1350 } 1350 }
1351 1351
1352 const intptr_t xmm_regs_count = locs->live_registers()->xmm_regs_count(); 1352 const intptr_t xmm_regs_count = locs->live_registers()->fpu_regs_count();
1353 if (xmm_regs_count > 0) { 1353 if (xmm_regs_count > 0) {
1354 // XMM registers have the lowest register number at the lowest address. 1354 // XMM registers have the lowest register number at the lowest address.
1355 intptr_t offset = 0; 1355 intptr_t offset = 0;
1356 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { 1356 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
1357 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); 1357 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
1358 if (locs->live_registers()->ContainsXmmRegister(xmm_reg)) { 1358 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
1359 __ movsd(xmm_reg, Address(ESP, offset)); 1359 __ movsd(xmm_reg, Address(ESP, offset));
1360 offset += kDoubleSize; 1360 offset += kDoubleSize;
1361 } 1361 }
1362 } 1362 }
1363 ASSERT(offset == (xmm_regs_count * kDoubleSize)); 1363 ASSERT(offset == (xmm_regs_count * kDoubleSize));
1364 __ addl(ESP, Immediate(offset)); 1364 __ addl(ESP, Immediate(offset));
1365 } 1365 }
1366 } 1366 }
1367 1367
1368 1368
1369 struct CidTarget {
1370 intptr_t cid;
1371 Function* target;
1372 intptr_t count;
1373 CidTarget(intptr_t cid_arg,
1374 Function* target_arg,
1375 intptr_t count_arg)
1376 : cid(cid_arg), target(target_arg), count(count_arg) {}
1377 };
1378
1379
1380 // Returns 'sorted' array in decreasing count order.
1381 // The expected number of elements to sort is less than 10.
1382 static void SortICDataByCount(const ICData& ic_data,
1383 GrowableArray<CidTarget>* sorted) {
1384 ASSERT(ic_data.num_args_tested() == 1);
1385 const intptr_t len = ic_data.NumberOfChecks();
1386 sorted->Clear();
1387
1388 for (int i = 0; i < len; i++) {
1389 sorted->Add(CidTarget(ic_data.GetReceiverClassIdAt(i),
1390 &Function::ZoneHandle(ic_data.GetTargetAt(i)),
1391 ic_data.GetCountAt(i)));
1392 }
1393 for (int i = 0; i < len; i++) {
1394 intptr_t largest_ix = i;
1395 for (int k = i + 1; k < len; k++) {
1396 if ((*sorted)[largest_ix].count < (*sorted)[k].count) {
1397 largest_ix = k;
1398 }
1399 }
1400 if (i != largest_ix) {
1401 // Swap.
1402 CidTarget temp = (*sorted)[i];
1403 (*sorted)[i] = (*sorted)[largest_ix];
1404 (*sorted)[largest_ix] = temp;
1405 }
1406 }
1407 }
1408
1409
1410 void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data,
1411 Register class_id_reg,
1412 intptr_t arg_count,
1413 const Array& arg_names,
1414 Label* deopt,
1415 intptr_t deopt_id,
1416 intptr_t token_index,
1417 LocationSummary* locs) {
1418 ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0));
1419 Label match_found;
1420 const intptr_t len = ic_data.NumberOfChecks();
1421 GrowableArray<CidTarget> sorted(len);
1422 SortICDataByCount(ic_data, &sorted);
1423 for (intptr_t i = 0; i < len; i++) {
1424 const bool is_last_check = (i == (len - 1));
1425 Label next_test;
1426 assembler()->cmpl(class_id_reg, Immediate(sorted[i].cid));
1427 if (is_last_check) {
1428 assembler()->j(NOT_EQUAL, deopt);
1429 } else {
1430 assembler()->j(NOT_EQUAL, &next_test);
1431 }
1432 GenerateStaticCall(deopt_id,
1433 token_index,
1434 *sorted[i].target,
1435 arg_count,
1436 arg_names,
1437 locs);
1438 if (!is_last_check) {
1439 assembler()->jmp(&match_found);
1440 }
1441 assembler()->Bind(&next_test);
1442 }
1443 assembler()->Bind(&match_found);
1444 }
1445
1446
1447 void FlowGraphCompiler::EmitDoubleCompareBranch(Condition true_condition,
1448 FpuRegister left,
1449 FpuRegister right,
1450 BranchInstr* branch) {
1451 ASSERT(branch != NULL);
1452 assembler()->comisd(left, right);
1453 BlockEntryInstr* nan_result = (true_condition == NOT_EQUAL) ?
1454 branch->true_successor() : branch->false_successor();
1455 assembler()->j(PARITY_EVEN, GetBlockLabel(nan_result));
1456 branch->EmitBranchOnCondition(this, true_condition);
1457 }
1458
1459
1460
1461 void FlowGraphCompiler::EmitDoubleCompareBool(Condition true_condition,
1462 FpuRegister left,
1463 FpuRegister right,
1464 Register result) {
1465 assembler()->comisd(left, right);
1466 Label is_false, is_true, done;
1467 assembler()->j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN false;
1468 assembler()->j(true_condition, &is_true, Assembler::kNearJump);
1469 assembler()->Bind(&is_false);
1470 assembler()->LoadObject(result, Bool::False());
1471 assembler()->jmp(&done);
1472 assembler()->Bind(&is_true);
1473 assembler()->LoadObject(result, Bool::True());
1474 assembler()->Bind(&done);
1475 }
1476
1477
1478 Condition FlowGraphCompiler::FlipCondition(Condition condition) {
1479 switch (condition) {
1480 case EQUAL: return EQUAL;
1481 case NOT_EQUAL: return NOT_EQUAL;
1482 case LESS: return GREATER;
1483 case LESS_EQUAL: return GREATER_EQUAL;
1484 case GREATER: return LESS;
1485 case GREATER_EQUAL: return LESS_EQUAL;
1486 case BELOW: return ABOVE;
1487 case BELOW_EQUAL: return ABOVE_EQUAL;
1488 case ABOVE: return BELOW;
1489 case ABOVE_EQUAL: return BELOW_EQUAL;
1490 default:
1491 UNIMPLEMENTED();
1492 return EQUAL;
1493 }
1494 }
1495
1496
1497 bool FlowGraphCompiler::EvaluateCondition(Condition condition,
1498 intptr_t left,
1499 intptr_t right) {
1500 const uintptr_t unsigned_left = static_cast<uintptr_t>(left);
1501 const uintptr_t unsigned_right = static_cast<uintptr_t>(right);
1502 switch (condition) {
1503 case EQUAL: return left == right;
1504 case NOT_EQUAL: return left != right;
1505 case LESS: return left < right;
1506 case LESS_EQUAL: return left <= right;
1507 case GREATER: return left > right;
1508 case GREATER_EQUAL: return left >= right;
1509 case BELOW: return unsigned_left < unsigned_right;
1510 case BELOW_EQUAL: return unsigned_left <= unsigned_right;
1511 case ABOVE: return unsigned_left > unsigned_right;
1512 case ABOVE_EQUAL: return unsigned_left >= unsigned_right;
1513 default:
1514 UNIMPLEMENTED();
1515 return false;
1516 }
1517 }
1518
1519
1520 FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
1521 Register array,
1522 intptr_t index) {
1523 const int64_t disp =
1524 static_cast<int64_t>(index) * ElementSizeFor(cid) + DataOffsetFor(cid);
1525 ASSERT(Utils::IsInt(32, disp));
1526 return FieldAddress(array, static_cast<int32_t>(disp));
1527 }
1528
1529
1530 FieldAddress FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
1531 Register array,
1532 Register index) {
1533 // Note that index is smi-tagged, (i.e, times 2) for all arrays with element
1534 // size > 1. For Uint8Array and OneByteString the index is expected to be
1535 // untagged before accessing.
1536 ASSERT(kSmiTagShift == 1);
1537 switch (cid) {
1538 case kArrayCid:
1539 case kImmutableArrayCid:
1540 return FieldAddress(
1541 array, index, TIMES_HALF_WORD_SIZE, Array::data_offset());
1542 case kFloat32ArrayCid:
1543 return FieldAddress(array, index, TIMES_2, Float32Array::data_offset());
1544 case kFloat64ArrayCid:
1545 return FieldAddress(array, index, TIMES_4, Float64Array::data_offset());
1546 case kUint8ArrayCid:
1547 return FieldAddress(array, index, TIMES_1, Uint8Array::data_offset());
1548 case kUint8ClampedArrayCid:
1549 return
1550 FieldAddress(array, index, TIMES_1, Uint8ClampedArray::data_offset());
1551 case kOneByteStringCid:
1552 return FieldAddress(array, index, TIMES_1, OneByteString::data_offset());
1553 case kTwoByteStringCid:
1554 return FieldAddress(array, index, TIMES_1, TwoByteString::data_offset());
1555 default:
1556 UNIMPLEMENTED();
1557 return FieldAddress(SPREG, 0);
1558 }
1559 }
1560
1561
1369 #undef __ 1562 #undef __
1370 #define __ compiler_->assembler()-> 1563 #define __ compiler_->assembler()->
1371 1564
1372 1565
1373 void ParallelMoveResolver::EmitMove(int index) { 1566 void ParallelMoveResolver::EmitMove(int index) {
1374 MoveOperands* move = moves_[index]; 1567 MoveOperands* move = moves_[index];
1375 const Location source = move->src(); 1568 const Location source = move->src();
1376 const Location destination = move->dest(); 1569 const Location destination = move->dest();
1377 1570
1378 if (source.IsRegister()) { 1571 if (source.IsRegister()) {
1379 if (destination.IsRegister()) { 1572 if (destination.IsRegister()) {
1380 __ movl(destination.reg(), source.reg()); 1573 __ movl(destination.reg(), source.reg());
1381 } else { 1574 } else {
1382 ASSERT(destination.IsStackSlot()); 1575 ASSERT(destination.IsStackSlot());
1383 __ movl(destination.ToStackSlotAddress(), source.reg()); 1576 __ movl(destination.ToStackSlotAddress(), source.reg());
1384 } 1577 }
1385 } else if (source.IsStackSlot()) { 1578 } else if (source.IsStackSlot()) {
1386 if (destination.IsRegister()) { 1579 if (destination.IsRegister()) {
1387 __ movl(destination.reg(), source.ToStackSlotAddress()); 1580 __ movl(destination.reg(), source.ToStackSlotAddress());
1388 } else { 1581 } else {
1389 ASSERT(destination.IsStackSlot()); 1582 ASSERT(destination.IsStackSlot());
1390 MoveMemoryToMemory(destination.ToStackSlotAddress(), 1583 MoveMemoryToMemory(destination.ToStackSlotAddress(),
1391 source.ToStackSlotAddress()); 1584 source.ToStackSlotAddress());
1392 } 1585 }
1393 } else if (source.IsXmmRegister()) { 1586 } else if (source.IsFpuRegister()) {
1394 if (destination.IsXmmRegister()) { 1587 if (destination.IsFpuRegister()) {
1395 // Optimization manual recommends using MOVAPS for register 1588 // Optimization manual recommends using MOVAPS for register
1396 // to register moves. 1589 // to register moves.
1397 __ movaps(destination.xmm_reg(), source.xmm_reg()); 1590 __ movaps(destination.fpu_reg(), source.fpu_reg());
1398 } else { 1591 } else {
1399 ASSERT(destination.IsDoubleStackSlot()); 1592 ASSERT(destination.IsDoubleStackSlot());
1400 __ movsd(destination.ToStackSlotAddress(), source.xmm_reg()); 1593 __ movsd(destination.ToStackSlotAddress(), source.fpu_reg());
1401 } 1594 }
1402 } else if (source.IsDoubleStackSlot()) { 1595 } else if (source.IsDoubleStackSlot()) {
1403 if (destination.IsXmmRegister()) { 1596 if (destination.IsFpuRegister()) {
1404 __ movsd(destination.xmm_reg(), source.ToStackSlotAddress()); 1597 __ movsd(destination.fpu_reg(), source.ToStackSlotAddress());
1405 } else { 1598 } else {
1406 ASSERT(destination.IsDoubleStackSlot()); 1599 ASSERT(destination.IsDoubleStackSlot());
1407 __ movsd(XMM0, source.ToStackSlotAddress()); 1600 __ movsd(XMM0, source.ToStackSlotAddress());
1408 __ movsd(destination.ToStackSlotAddress(), XMM0); 1601 __ movsd(destination.ToStackSlotAddress(), XMM0);
1409 } 1602 }
1410 } else { 1603 } else {
1411 ASSERT(source.IsConstant()); 1604 ASSERT(source.IsConstant());
1412 if (destination.IsRegister()) { 1605 if (destination.IsRegister()) {
1413 const Object& constant = source.constant(); 1606 const Object& constant = source.constant();
1414 if (constant.IsSmi() && (Smi::Cast(constant).Value() == 0)) { 1607 if (constant.IsSmi() && (Smi::Cast(constant).Value() == 0)) {
(...skipping 17 matching lines...) Expand all
1432 const Location destination = move->dest(); 1625 const Location destination = move->dest();
1433 1626
1434 if (source.IsRegister() && destination.IsRegister()) { 1627 if (source.IsRegister() && destination.IsRegister()) {
1435 __ xchgl(destination.reg(), source.reg()); 1628 __ xchgl(destination.reg(), source.reg());
1436 } else if (source.IsRegister() && destination.IsStackSlot()) { 1629 } else if (source.IsRegister() && destination.IsStackSlot()) {
1437 Exchange(source.reg(), destination.ToStackSlotAddress()); 1630 Exchange(source.reg(), destination.ToStackSlotAddress());
1438 } else if (source.IsStackSlot() && destination.IsRegister()) { 1631 } else if (source.IsStackSlot() && destination.IsRegister()) {
1439 Exchange(destination.reg(), source.ToStackSlotAddress()); 1632 Exchange(destination.reg(), source.ToStackSlotAddress());
1440 } else if (source.IsStackSlot() && destination.IsStackSlot()) { 1633 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1441 Exchange(destination.ToStackSlotAddress(), source.ToStackSlotAddress()); 1634 Exchange(destination.ToStackSlotAddress(), source.ToStackSlotAddress());
1442 } else if (source.IsXmmRegister() && destination.IsXmmRegister()) { 1635 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1443 __ movaps(XMM0, source.xmm_reg()); 1636 __ movaps(XMM0, source.fpu_reg());
1444 __ movaps(source.xmm_reg(), destination.xmm_reg()); 1637 __ movaps(source.fpu_reg(), destination.fpu_reg());
1445 __ movaps(destination.xmm_reg(), XMM0); 1638 __ movaps(destination.fpu_reg(), XMM0);
1446 } else if (source.IsXmmRegister() || destination.IsXmmRegister()) { 1639 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1447 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot()); 1640 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot());
1448 XmmRegister reg = source.IsXmmRegister() ? source.xmm_reg() 1641 XmmRegister reg = source.IsFpuRegister() ? source.fpu_reg()
1449 : destination.xmm_reg(); 1642 : destination.fpu_reg();
1450 Address slot_address = source.IsXmmRegister() 1643 Address slot_address = source.IsFpuRegister()
1451 ? destination.ToStackSlotAddress() 1644 ? destination.ToStackSlotAddress()
1452 : source.ToStackSlotAddress(); 1645 : source.ToStackSlotAddress();
1453 1646
1454 __ movsd(XMM0, slot_address); 1647 __ movsd(XMM0, slot_address);
1455 __ movsd(slot_address, reg); 1648 __ movsd(slot_address, reg);
1456 __ movaps(reg, XMM0); 1649 __ movaps(reg, XMM0);
1457 } else { 1650 } else {
1458 UNREACHABLE(); 1651 UNREACHABLE();
1459 } 1652 }
1460 1653
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1521 __ popl(ECX); 1714 __ popl(ECX);
1522 __ popl(EAX); 1715 __ popl(EAX);
1523 } 1716 }
1524 1717
1525 1718
1526 #undef __ 1719 #undef __
1527 1720
1528 } // namespace dart 1721 } // namespace dart
1529 1722
1530 #endif // defined TARGET_ARCH_IA32 1723 #endif // defined TARGET_ARCH_IA32
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698