Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(66)

Side by Side Diff: runtime/vm/flow_graph_compiler_x64.cc

Issue 11956004: Fix vm code base so that it can be built for --arch=simarm (no snapshot yet). (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6 #if defined(TARGET_ARCH_X64) 6 #if defined(TARGET_ARCH_X64)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "lib/error.h" 10 #include "lib/error.h"
11 #include "vm/ast_printer.h" 11 #include "vm/ast_printer.h"
12 #include "vm/dart_entry.h" 12 #include "vm/dart_entry.h"
13 #include "vm/il_printer.h" 13 #include "vm/il_printer.h"
14 #include "vm/locations.h" 14 #include "vm/locations.h"
15 #include "vm/object_store.h" 15 #include "vm/object_store.h"
16 #include "vm/parser.h" 16 #include "vm/parser.h"
17 #include "vm/stub_code.h" 17 #include "vm/stub_code.h"
18 #include "vm/symbols.h" 18 #include "vm/symbols.h"
19 19
20 namespace dart { 20 namespace dart {
21 21
22 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); 22 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
23 DECLARE_FLAG(int, optimization_counter_threshold); 23 DECLARE_FLAG(int, optimization_counter_threshold);
24 DECLARE_FLAG(bool, print_ast); 24 DECLARE_FLAG(bool, print_ast);
25 DECLARE_FLAG(bool, print_scopes); 25 DECLARE_FLAG(bool, print_scopes);
26 DECLARE_FLAG(bool, use_sse41); 26 DECLARE_FLAG(bool, use_sse41);
27 27
28 28
29 FlowGraphCompiler::~FlowGraphCompiler() {
30 // BlockInfos are zone-allocated, so their destructors are not called.
31 // Verify the labels explicitly here.
32 for (int i = 0; i < block_info_.length(); ++i) {
33 ASSERT(!block_info_[i]->label.IsLinked());
34 ASSERT(!block_info_[i]->label.HasNear());
35 }
36 }
37
38
29 bool FlowGraphCompiler::SupportsUnboxedMints() { 39 bool FlowGraphCompiler::SupportsUnboxedMints() {
30 return false; 40 return false;
31 } 41 }
32 42
33 43
34 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, 44 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
35 intptr_t stub_ix) { 45 intptr_t stub_ix) {
36 // Calls do not need stubs, they share a deoptimization trampoline. 46 // Calls do not need stubs, they share a deoptimization trampoline.
37 ASSERT(reason() != kDeoptAtCall); 47 ASSERT(reason() != kDeoptAtCall);
38 Assembler* assem = compiler->assembler(); 48 Assembler* assem = compiler->assembler();
(...skipping 1249 matching lines...) Expand 10 before | Expand all | Expand 10 after
1288 __ Drop(1); 1298 __ Drop(1);
1289 __ jmp(skip_call); 1299 __ jmp(skip_call);
1290 __ Bind(&is_false); 1300 __ Bind(&is_false);
1291 __ LoadObject(result, Bool::False()); 1301 __ LoadObject(result, Bool::False());
1292 __ Drop(1); 1302 __ Drop(1);
1293 __ jmp(skip_call); 1303 __ jmp(skip_call);
1294 __ Bind(&fall_through); 1304 __ Bind(&fall_through);
1295 } 1305 }
1296 1306
1297 1307
1298 void FlowGraphCompiler::LoadDoubleOrSmiToXmm(XmmRegister result, 1308 void FlowGraphCompiler::LoadDoubleOrSmiToFpu(FpuRegister result,
1299 Register reg, 1309 Register reg,
1300 Register temp, 1310 Register temp,
1301 Label* not_double_or_smi) { 1311 Label* not_double_or_smi) {
1302 Label is_smi, done; 1312 Label is_smi, done;
1303 __ testq(reg, Immediate(kSmiTagMask)); 1313 __ testq(reg, Immediate(kSmiTagMask));
1304 __ j(ZERO, &is_smi); 1314 __ j(ZERO, &is_smi);
1305 __ CompareClassId(reg, kDoubleCid); 1315 __ CompareClassId(reg, kDoubleCid);
1306 __ j(NOT_EQUAL, not_double_or_smi); 1316 __ j(NOT_EQUAL, not_double_or_smi);
1307 __ movsd(result, FieldAddress(reg, Double::value_offset())); 1317 __ movsd(result, FieldAddress(reg, Double::value_offset()));
1308 __ jmp(&done); 1318 __ jmp(&done);
1309 __ Bind(&is_smi); 1319 __ Bind(&is_smi);
1310 __ movq(temp, reg); 1320 __ movq(temp, reg);
1311 __ SmiUntag(temp); 1321 __ SmiUntag(temp);
1312 __ cvtsi2sd(result, temp); 1322 __ cvtsi2sd(result, temp);
1313 __ Bind(&done); 1323 __ Bind(&done);
1314 } 1324 }
1315 1325
1316 1326
1317 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { 1327 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
1318 // TODO(vegorov): consider saving only caller save (volatile) registers. 1328 // TODO(vegorov): consider saving only caller save (volatile) registers.
1319 const intptr_t xmm_regs_count = locs->live_registers()->xmm_regs_count(); 1329 const intptr_t xmm_regs_count = locs->live_registers()->fpu_regs_count();
1320 if (xmm_regs_count > 0) { 1330 if (xmm_regs_count > 0) {
1321 __ subq(RSP, Immediate(xmm_regs_count * kDoubleSize)); 1331 __ subq(RSP, Immediate(xmm_regs_count * kDoubleSize));
1322 // Store XMM registers with the lowest register number at the lowest 1332 // Store XMM registers with the lowest register number at the lowest
1323 // address. 1333 // address.
1324 intptr_t offset = 0; 1334 intptr_t offset = 0;
1325 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { 1335 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
1326 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); 1336 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
1327 if (locs->live_registers()->ContainsXmmRegister(xmm_reg)) { 1337 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
1328 __ movsd(Address(RSP, offset), xmm_reg); 1338 __ movsd(Address(RSP, offset), xmm_reg);
1329 offset += kDoubleSize; 1339 offset += kDoubleSize;
1330 } 1340 }
1331 } 1341 }
1332 ASSERT(offset == (xmm_regs_count * kDoubleSize)); 1342 ASSERT(offset == (xmm_regs_count * kDoubleSize));
1333 } 1343 }
1334 1344
1335 // Store general purpose registers with the highest register number at the 1345 // Store general purpose registers with the highest register number at the
1336 // lowest address. 1346 // lowest address.
1337 for (intptr_t reg_idx = 0; reg_idx < kNumberOfCpuRegisters; ++reg_idx) { 1347 for (intptr_t reg_idx = 0; reg_idx < kNumberOfCpuRegisters; ++reg_idx) {
1338 Register reg = static_cast<Register>(reg_idx); 1348 Register reg = static_cast<Register>(reg_idx);
1339 if (locs->live_registers()->ContainsRegister(reg)) { 1349 if (locs->live_registers()->ContainsRegister(reg)) {
1340 __ pushq(reg); 1350 __ pushq(reg);
1341 } 1351 }
1342 } 1352 }
1343 } 1353 }
1344 1354
1345 1355
1346 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { 1356 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
1347 // General purpose registers have the highest register number at the 1357 // General purpose registers have the highest register number at the
1348 // lowest address. 1358 // lowest address.
1349 for (intptr_t reg_idx = kNumberOfCpuRegisters - 1; reg_idx >= 0; --reg_idx) { 1359 for (intptr_t reg_idx = kNumberOfCpuRegisters - 1; reg_idx >= 0; --reg_idx) {
1350 Register reg = static_cast<Register>(reg_idx); 1360 Register reg = static_cast<Register>(reg_idx);
1351 if (locs->live_registers()->ContainsRegister(reg)) { 1361 if (locs->live_registers()->ContainsRegister(reg)) {
1352 __ popq(reg); 1362 __ popq(reg);
1353 } 1363 }
1354 } 1364 }
1355 1365
1356 const intptr_t xmm_regs_count = locs->live_registers()->xmm_regs_count(); 1366 const intptr_t xmm_regs_count = locs->live_registers()->fpu_regs_count();
1357 if (xmm_regs_count > 0) { 1367 if (xmm_regs_count > 0) {
1358 // XMM registers have the lowest register number at the lowest address. 1368 // XMM registers have the lowest register number at the lowest address.
1359 intptr_t offset = 0; 1369 intptr_t offset = 0;
1360 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { 1370 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
1361 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); 1371 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
1362 if (locs->live_registers()->ContainsXmmRegister(xmm_reg)) { 1372 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
1363 __ movsd(xmm_reg, Address(RSP, offset)); 1373 __ movsd(xmm_reg, Address(RSP, offset));
1364 offset += kDoubleSize; 1374 offset += kDoubleSize;
1365 } 1375 }
1366 } 1376 }
1367 ASSERT(offset == (xmm_regs_count * kDoubleSize)); 1377 ASSERT(offset == (xmm_regs_count * kDoubleSize));
1368 __ addq(RSP, Immediate(offset)); 1378 __ addq(RSP, Immediate(offset));
1369 } 1379 }
1370 } 1380 }
1371 1381
1372 1382
1383 struct CidTarget {
1384 intptr_t cid;
1385 Function* target;
1386 intptr_t count;
1387 CidTarget(intptr_t cid_arg,
1388 Function* target_arg,
1389 intptr_t count_arg)
1390 : cid(cid_arg), target(target_arg), count(count_arg) {}
1391 };
1392
1393
1394 // Returns 'sorted' array in decreasing count order.
1395 // The expected number of elements to sort is less than 10.
1396 static void SortICDataByCount(const ICData& ic_data,
1397 GrowableArray<CidTarget>* sorted) {
1398 ASSERT(ic_data.num_args_tested() == 1);
1399 const intptr_t len = ic_data.NumberOfChecks();
1400 sorted->Clear();
1401
1402 for (int i = 0; i < len; i++) {
1403 sorted->Add(CidTarget(ic_data.GetReceiverClassIdAt(i),
1404 &Function::ZoneHandle(ic_data.GetTargetAt(i)),
1405 ic_data.GetCountAt(i)));
1406 }
1407 for (int i = 0; i < len; i++) {
1408 intptr_t largest_ix = i;
1409 for (int k = i + 1; k < len; k++) {
1410 if ((*sorted)[largest_ix].count < (*sorted)[k].count) {
1411 largest_ix = k;
1412 }
1413 }
1414 if (i != largest_ix) {
1415 // Swap.
1416 CidTarget temp = (*sorted)[i];
1417 (*sorted)[i] = (*sorted)[largest_ix];
1418 (*sorted)[largest_ix] = temp;
1419 }
1420 }
1421 }
1422
1423
1424 void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data,
1425 Register class_id_reg,
1426 intptr_t arg_count,
1427 const Array& arg_names,
1428 Label* deopt,
1429 intptr_t deopt_id,
1430 intptr_t token_index,
1431 LocationSummary* locs) {
1432 ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0));
1433 Label match_found;
1434 const intptr_t len = ic_data.NumberOfChecks();
1435 GrowableArray<CidTarget> sorted(len);
1436 SortICDataByCount(ic_data, &sorted);
1437 for (intptr_t i = 0; i < len; i++) {
1438 const bool is_last_check = (i == (len - 1));
1439 Label next_test;
1440 assembler()->cmpl(class_id_reg, Immediate(sorted[i].cid));
1441 if (is_last_check) {
1442 assembler()->j(NOT_EQUAL, deopt);
1443 } else {
1444 assembler()->j(NOT_EQUAL, &next_test);
1445 }
1446 GenerateStaticCall(deopt_id,
1447 token_index,
1448 *sorted[i].target,
1449 arg_count,
1450 arg_names,
1451 locs);
1452 if (!is_last_check) {
1453 assembler()->jmp(&match_found);
1454 }
1455 assembler()->Bind(&next_test);
1456 }
1457 assembler()->Bind(&match_found);
1458 }
1459
1460
1461 void FlowGraphCompiler::EmitDoubleCompareBranch(Condition true_condition,
1462 FpuRegister left,
1463 FpuRegister right,
1464 BranchInstr* branch) {
1465 ASSERT(branch != NULL);
1466 assembler()->comisd(left, right);
1467 BlockEntryInstr* nan_result = (true_condition == NOT_EQUAL) ?
1468 branch->true_successor() : branch->false_successor();
1469 assembler()->j(PARITY_EVEN, GetBlockLabel(nan_result));
1470 branch->EmitBranchOnCondition(this, true_condition);
1471 }
1472
1473
1474
1475 void FlowGraphCompiler::EmitDoubleCompareBool(Condition true_condition,
1476 FpuRegister left,
1477 FpuRegister right,
1478 Register result) {
1479 assembler()->comisd(left, right);
1480 Label is_false, is_true, done;
1481 assembler()->j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN false;
1482 assembler()->j(true_condition, &is_true, Assembler::kNearJump);
1483 assembler()->Bind(&is_false);
1484 assembler()->LoadObject(result, Bool::False());
1485 assembler()->jmp(&done);
1486 assembler()->Bind(&is_true);
1487 assembler()->LoadObject(result, Bool::True());
1488 assembler()->Bind(&done);
1489 }
1490
1491
1492 Condition FlowGraphCompiler::FlipCondition(Condition condition) {
1493 switch (condition) {
1494 case EQUAL: return EQUAL;
1495 case NOT_EQUAL: return NOT_EQUAL;
1496 case LESS: return GREATER;
1497 case LESS_EQUAL: return GREATER_EQUAL;
1498 case GREATER: return LESS;
1499 case GREATER_EQUAL: return LESS_EQUAL;
1500 case BELOW: return ABOVE;
1501 case BELOW_EQUAL: return ABOVE_EQUAL;
1502 case ABOVE: return BELOW;
1503 case ABOVE_EQUAL: return BELOW_EQUAL;
1504 default:
1505 UNIMPLEMENTED();
1506 return EQUAL;
1507 }
1508 }
1509
1510
1511 bool FlowGraphCompiler::EvaluateCondition(Condition condition,
1512 intptr_t left,
1513 intptr_t right) {
1514 const uintptr_t unsigned_left = static_cast<uintptr_t>(left);
1515 const uintptr_t unsigned_right = static_cast<uintptr_t>(right);
1516 switch (condition) {
1517 case EQUAL: return left == right;
1518 case NOT_EQUAL: return left != right;
1519 case LESS: return left < right;
1520 case LESS_EQUAL: return left <= right;
1521 case GREATER: return left > right;
1522 case GREATER_EQUAL: return left >= right;
1523 case BELOW: return unsigned_left < unsigned_right;
1524 case BELOW_EQUAL: return unsigned_left <= unsigned_right;
1525 case ABOVE: return unsigned_left > unsigned_right;
1526 case ABOVE_EQUAL: return unsigned_left >= unsigned_right;
1527 default:
1528 UNIMPLEMENTED();
1529 return false;
1530 }
1531 }
1532
1533
1534 FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
1535 Register array,
1536 intptr_t index) {
1537 const int64_t disp =
1538 static_cast<int64_t>(index) * ElementSizeFor(cid) + DataOffsetFor(cid);
1539 ASSERT(Utils::IsInt(32, disp));
1540 return FieldAddress(array, static_cast<int32_t>(disp));
1541 }
1542
1543
1544 FieldAddress FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
1545 Register array,
1546 Register index) {
1547 // Note that index is smi-tagged, (i.e, times 2) for all arrays with element
1548 // size > 1. For Uint8Array and OneByteString the index is expected to be
1549 // untagged before accessing.
1550 ASSERT(kSmiTagShift == 1);
1551 switch (cid) {
1552 case kArrayCid:
1553 case kImmutableArrayCid:
1554 return FieldAddress(
1555 array, index, TIMES_HALF_WORD_SIZE, Array::data_offset());
1556 case kFloat32ArrayCid:
1557 return FieldAddress(array, index, TIMES_2, Float32Array::data_offset());
1558 case kFloat64ArrayCid:
1559 return FieldAddress(array, index, TIMES_4, Float64Array::data_offset());
1560 case kUint8ArrayCid:
1561 return FieldAddress(array, index, TIMES_1, Uint8Array::data_offset());
1562 case kUint8ClampedArrayCid:
1563 return
1564 FieldAddress(array, index, TIMES_1, Uint8ClampedArray::data_offset());
1565 case kOneByteStringCid:
1566 return FieldAddress(array, index, TIMES_1, OneByteString::data_offset());
1567 case kTwoByteStringCid:
1568 return FieldAddress(array, index, TIMES_1, TwoByteString::data_offset());
1569 default:
1570 UNIMPLEMENTED();
1571 return FieldAddress(SPREG, 0);
1572 }
1573 }
1574
1575
1373 #undef __ 1576 #undef __
1374 #define __ compiler_->assembler()-> 1577 #define __ compiler_->assembler()->
1375 1578
1376 1579
1377 void ParallelMoveResolver::EmitMove(int index) { 1580 void ParallelMoveResolver::EmitMove(int index) {
1378 MoveOperands* move = moves_[index]; 1581 MoveOperands* move = moves_[index];
1379 const Location source = move->src(); 1582 const Location source = move->src();
1380 const Location destination = move->dest(); 1583 const Location destination = move->dest();
1381 1584
1382 if (source.IsRegister()) { 1585 if (source.IsRegister()) {
1383 if (destination.IsRegister()) { 1586 if (destination.IsRegister()) {
1384 __ movq(destination.reg(), source.reg()); 1587 __ movq(destination.reg(), source.reg());
1385 } else { 1588 } else {
1386 ASSERT(destination.IsStackSlot()); 1589 ASSERT(destination.IsStackSlot());
1387 __ movq(destination.ToStackSlotAddress(), source.reg()); 1590 __ movq(destination.ToStackSlotAddress(), source.reg());
1388 } 1591 }
1389 } else if (source.IsStackSlot()) { 1592 } else if (source.IsStackSlot()) {
1390 if (destination.IsRegister()) { 1593 if (destination.IsRegister()) {
1391 __ movq(destination.reg(), source.ToStackSlotAddress()); 1594 __ movq(destination.reg(), source.ToStackSlotAddress());
1392 } else { 1595 } else {
1393 ASSERT(destination.IsStackSlot()); 1596 ASSERT(destination.IsStackSlot());
1394 MoveMemoryToMemory(destination.ToStackSlotAddress(), 1597 MoveMemoryToMemory(destination.ToStackSlotAddress(),
1395 source.ToStackSlotAddress()); 1598 source.ToStackSlotAddress());
1396 } 1599 }
1397 } else if (source.IsXmmRegister()) { 1600 } else if (source.IsFpuRegister()) {
1398 if (destination.IsXmmRegister()) { 1601 if (destination.IsFpuRegister()) {
1399 // Optimization manual recommends using MOVAPS for register 1602 // Optimization manual recommends using MOVAPS for register
1400 // to register moves. 1603 // to register moves.
1401 __ movaps(destination.xmm_reg(), source.xmm_reg()); 1604 __ movaps(destination.fpu_reg(), source.fpu_reg());
1402 } else { 1605 } else {
1403 ASSERT(destination.IsDoubleStackSlot()); 1606 ASSERT(destination.IsDoubleStackSlot());
1404 __ movsd(destination.ToStackSlotAddress(), source.xmm_reg()); 1607 __ movsd(destination.ToStackSlotAddress(), source.fpu_reg());
1405 } 1608 }
1406 } else if (source.IsDoubleStackSlot()) { 1609 } else if (source.IsDoubleStackSlot()) {
1407 if (destination.IsXmmRegister()) { 1610 if (destination.IsFpuRegister()) {
1408 __ movsd(destination.xmm_reg(), source.ToStackSlotAddress()); 1611 __ movsd(destination.fpu_reg(), source.ToStackSlotAddress());
1409 } else { 1612 } else {
1410 ASSERT(destination.IsDoubleStackSlot()); 1613 ASSERT(destination.IsDoubleStackSlot());
1411 __ movsd(XMM0, source.ToStackSlotAddress()); 1614 __ movsd(XMM0, source.ToStackSlotAddress());
1412 __ movsd(destination.ToStackSlotAddress(), XMM0); 1615 __ movsd(destination.ToStackSlotAddress(), XMM0);
1413 } 1616 }
1414 } else { 1617 } else {
1415 ASSERT(source.IsConstant()); 1618 ASSERT(source.IsConstant());
1416 if (destination.IsRegister()) { 1619 if (destination.IsRegister()) {
1417 const Object& constant = source.constant(); 1620 const Object& constant = source.constant();
1418 if (constant.IsSmi() && (Smi::Cast(constant).Value() == 0)) { 1621 if (constant.IsSmi() && (Smi::Cast(constant).Value() == 0)) {
(...skipping 17 matching lines...) Expand all
1436 const Location destination = move->dest(); 1639 const Location destination = move->dest();
1437 1640
1438 if (source.IsRegister() && destination.IsRegister()) { 1641 if (source.IsRegister() && destination.IsRegister()) {
1439 __ xchgq(destination.reg(), source.reg()); 1642 __ xchgq(destination.reg(), source.reg());
1440 } else if (source.IsRegister() && destination.IsStackSlot()) { 1643 } else if (source.IsRegister() && destination.IsStackSlot()) {
1441 Exchange(source.reg(), destination.ToStackSlotAddress()); 1644 Exchange(source.reg(), destination.ToStackSlotAddress());
1442 } else if (source.IsStackSlot() && destination.IsRegister()) { 1645 } else if (source.IsStackSlot() && destination.IsRegister()) {
1443 Exchange(destination.reg(), source.ToStackSlotAddress()); 1646 Exchange(destination.reg(), source.ToStackSlotAddress());
1444 } else if (source.IsStackSlot() && destination.IsStackSlot()) { 1647 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1445 Exchange(destination.ToStackSlotAddress(), source.ToStackSlotAddress()); 1648 Exchange(destination.ToStackSlotAddress(), source.ToStackSlotAddress());
1446 } else if (source.IsXmmRegister() && destination.IsXmmRegister()) { 1649 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1447 __ movaps(XMM0, source.xmm_reg()); 1650 __ movaps(XMM0, source.fpu_reg());
1448 __ movaps(source.xmm_reg(), destination.xmm_reg()); 1651 __ movaps(source.fpu_reg(), destination.fpu_reg());
1449 __ movaps(destination.xmm_reg(), XMM0); 1652 __ movaps(destination.fpu_reg(), XMM0);
1450 } else if (source.IsXmmRegister() || destination.IsXmmRegister()) { 1653 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1451 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot()); 1654 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot());
1452 XmmRegister reg = source.IsXmmRegister() ? source.xmm_reg() 1655 XmmRegister reg = source.IsFpuRegister() ? source.fpu_reg()
1453 : destination.xmm_reg(); 1656 : destination.fpu_reg();
1454 Address slot_address = source.IsXmmRegister() 1657 Address slot_address = source.IsFpuRegister()
1455 ? destination.ToStackSlotAddress() 1658 ? destination.ToStackSlotAddress()
1456 : source.ToStackSlotAddress(); 1659 : source.ToStackSlotAddress();
1457 1660
1458 __ movsd(XMM0, slot_address); 1661 __ movsd(XMM0, slot_address);
1459 __ movsd(slot_address, reg); 1662 __ movsd(slot_address, reg);
1460 __ movaps(reg, XMM0); 1663 __ movaps(reg, XMM0);
1461 } else { 1664 } else {
1462 UNREACHABLE(); 1665 UNREACHABLE();
1463 } 1666 }
1464 1667
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1499 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { 1702 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
1500 __ Exchange(mem1, mem2); 1703 __ Exchange(mem1, mem2);
1501 } 1704 }
1502 1705
1503 1706
1504 #undef __ 1707 #undef __
1505 1708
1506 } // namespace dart 1709 } // namespace dart
1507 1710
1508 #endif // defined TARGET_ARCH_X64 1711 #endif // defined TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698