Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(128)

Side by Side Diff: runtime/vm/flow_graph_compiler_ia32.cc

Issue 11956004: Fix vm code base so that it can be built for --arch=simarm (no snapshot yet). (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32.
6 #if defined(TARGET_ARCH_IA32) 6 #if defined(TARGET_ARCH_IA32)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "lib/error.h" 10 #include "lib/error.h"
11 #include "vm/ast_printer.h" 11 #include "vm/ast_printer.h"
12 #include "vm/dart_entry.h" 12 #include "vm/dart_entry.h"
13 #include "vm/il_printer.h" 13 #include "vm/il_printer.h"
14 #include "vm/locations.h" 14 #include "vm/locations.h"
15 #include "vm/object_store.h" 15 #include "vm/object_store.h"
16 #include "vm/parser.h" 16 #include "vm/parser.h"
17 #include "vm/stub_code.h" 17 #include "vm/stub_code.h"
18 #include "vm/symbols.h" 18 #include "vm/symbols.h"
19 19
20 namespace dart { 20 namespace dart {
21 21
22 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); 22 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
23 DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic."); 23 DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic.");
24 DECLARE_FLAG(int, optimization_counter_threshold); 24 DECLARE_FLAG(int, optimization_counter_threshold);
25 DECLARE_FLAG(bool, print_ast); 25 DECLARE_FLAG(bool, print_ast);
26 DECLARE_FLAG(bool, print_scopes); 26 DECLARE_FLAG(bool, print_scopes);
27 27
28 28
29 FlowGraphCompiler::~FlowGraphCompiler() {
30 // BlockInfos are zone-allocated, so their destructors are not called.
31 // Verify the labels explicitly here.
32 for (int i = 0; i < block_info_.length(); ++i) {
33 ASSERT(!block_info_[i]->label.IsLinked());
34 ASSERT(!block_info_[i]->label.HasNear());
35 }
36 }
37
38
29 bool FlowGraphCompiler::SupportsUnboxedMints() { 39 bool FlowGraphCompiler::SupportsUnboxedMints() {
30 // Support unboxed mints when SSE 4.1 is available. 40 // Support unboxed mints when SSE 4.1 is available.
31 return FLAG_unbox_mints && CPUFeatures::sse4_1_supported(); 41 return FLAG_unbox_mints && CPUFeatures::sse4_1_supported();
32 } 42 }
33 43
34 44
35 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, 45 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
36 intptr_t stub_ix) { 46 intptr_t stub_ix) {
37 // Calls do not need stubs, they share a deoptimization trampoline. 47 // Calls do not need stubs, they share a deoptimization trampoline.
38 ASSERT(reason() != kDeoptAtCall); 48 ASSERT(reason() != kDeoptAtCall);
(...skipping 1245 matching lines...) Expand 10 before | Expand all | Expand 10 after
1284 __ Drop(1); 1294 __ Drop(1);
1285 __ jmp(skip_call); 1295 __ jmp(skip_call);
1286 __ Bind(&is_false); 1296 __ Bind(&is_false);
1287 __ LoadObject(result, Bool::False()); 1297 __ LoadObject(result, Bool::False());
1288 __ Drop(1); 1298 __ Drop(1);
1289 __ jmp(skip_call); 1299 __ jmp(skip_call);
1290 __ Bind(&fall_through); 1300 __ Bind(&fall_through);
1291 } 1301 }
1292 1302
1293 1303
1294 void FlowGraphCompiler::LoadDoubleOrSmiToXmm(XmmRegister result, 1304 void FlowGraphCompiler::LoadDoubleOrSmiToFpu(FpuRegister result,
1295 Register reg, 1305 Register reg,
1296 Register temp, 1306 Register temp,
1297 Label* not_double_or_smi) { 1307 Label* not_double_or_smi) {
1298 Label is_smi, done; 1308 Label is_smi, done;
1299 __ testl(reg, Immediate(kSmiTagMask)); 1309 __ testl(reg, Immediate(kSmiTagMask));
1300 __ j(ZERO, &is_smi); 1310 __ j(ZERO, &is_smi);
1301 __ CompareClassId(reg, kDoubleCid, temp); 1311 __ CompareClassId(reg, kDoubleCid, temp);
1302 __ j(NOT_EQUAL, not_double_or_smi); 1312 __ j(NOT_EQUAL, not_double_or_smi);
1303 __ movsd(result, FieldAddress(reg, Double::value_offset())); 1313 __ movsd(result, FieldAddress(reg, Double::value_offset()));
1304 __ jmp(&done); 1314 __ jmp(&done);
1305 __ Bind(&is_smi); 1315 __ Bind(&is_smi);
1306 __ movl(temp, reg); 1316 __ movl(temp, reg);
1307 __ SmiUntag(temp); 1317 __ SmiUntag(temp);
1308 __ cvtsi2sd(result, temp); 1318 __ cvtsi2sd(result, temp);
1309 __ Bind(&done); 1319 __ Bind(&done);
1310 } 1320 }
1311 1321
1312 1322
1313 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { 1323 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
1314 // TODO(vegorov): consider saving only caller save (volatile) registers. 1324 // TODO(vegorov): consider saving only caller save (volatile) registers.
1315 const intptr_t xmm_regs_count = locs->live_registers()->xmm_regs_count(); 1325 const intptr_t xmm_regs_count = locs->live_registers()->fpu_regs_count();
1316 if (xmm_regs_count > 0) { 1326 if (xmm_regs_count > 0) {
1317 __ subl(ESP, Immediate(xmm_regs_count * kDoubleSize)); 1327 __ subl(ESP, Immediate(xmm_regs_count * kDoubleSize));
1318 // Store XMM registers with the lowest register number at the lowest 1328 // Store XMM registers with the lowest register number at the lowest
1319 // address. 1329 // address.
1320 intptr_t offset = 0; 1330 intptr_t offset = 0;
1321 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { 1331 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
1322 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); 1332 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
1323 if (locs->live_registers()->ContainsXmmRegister(xmm_reg)) { 1333 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
1324 __ movsd(Address(ESP, offset), xmm_reg); 1334 __ movsd(Address(ESP, offset), xmm_reg);
1325 offset += kDoubleSize; 1335 offset += kDoubleSize;
1326 } 1336 }
1327 } 1337 }
1328 ASSERT(offset == (xmm_regs_count * kDoubleSize)); 1338 ASSERT(offset == (xmm_regs_count * kDoubleSize));
1329 } 1339 }
1330 1340
1331 // Store general purpose registers with the highest register number at the 1341 // Store general purpose registers with the highest register number at the
1332 // lowest address. 1342 // lowest address.
1333 for (intptr_t reg_idx = 0; reg_idx < kNumberOfCpuRegisters; ++reg_idx) { 1343 for (intptr_t reg_idx = 0; reg_idx < kNumberOfCpuRegisters; ++reg_idx) {
1334 Register reg = static_cast<Register>(reg_idx); 1344 Register reg = static_cast<Register>(reg_idx);
1335 if (locs->live_registers()->ContainsRegister(reg)) { 1345 if (locs->live_registers()->ContainsRegister(reg)) {
1336 __ pushl(reg); 1346 __ pushl(reg);
1337 } 1347 }
1338 } 1348 }
1339 } 1349 }
1340 1350
1341 1351
1342 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { 1352 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
1343 // General purpose registers have the highest register number at the 1353 // General purpose registers have the highest register number at the
1344 // lowest address. 1354 // lowest address.
1345 for (intptr_t reg_idx = kNumberOfCpuRegisters - 1; reg_idx >= 0; --reg_idx) { 1355 for (intptr_t reg_idx = kNumberOfCpuRegisters - 1; reg_idx >= 0; --reg_idx) {
1346 Register reg = static_cast<Register>(reg_idx); 1356 Register reg = static_cast<Register>(reg_idx);
1347 if (locs->live_registers()->ContainsRegister(reg)) { 1357 if (locs->live_registers()->ContainsRegister(reg)) {
1348 __ popl(reg); 1358 __ popl(reg);
1349 } 1359 }
1350 } 1360 }
1351 1361
1352 const intptr_t xmm_regs_count = locs->live_registers()->xmm_regs_count(); 1362 const intptr_t xmm_regs_count = locs->live_registers()->fpu_regs_count();
1353 if (xmm_regs_count > 0) { 1363 if (xmm_regs_count > 0) {
1354 // XMM registers have the lowest register number at the lowest address. 1364 // XMM registers have the lowest register number at the lowest address.
1355 intptr_t offset = 0; 1365 intptr_t offset = 0;
1356 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { 1366 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
1357 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); 1367 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
1358 if (locs->live_registers()->ContainsXmmRegister(xmm_reg)) { 1368 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
1359 __ movsd(xmm_reg, Address(ESP, offset)); 1369 __ movsd(xmm_reg, Address(ESP, offset));
1360 offset += kDoubleSize; 1370 offset += kDoubleSize;
1361 } 1371 }
1362 } 1372 }
1363 ASSERT(offset == (xmm_regs_count * kDoubleSize)); 1373 ASSERT(offset == (xmm_regs_count * kDoubleSize));
1364 __ addl(ESP, Immediate(offset)); 1374 __ addl(ESP, Immediate(offset));
1365 } 1375 }
1366 } 1376 }
1367 1377
1368 1378
1379 struct CidTarget {
1380 intptr_t cid;
1381 Function* target;
1382 intptr_t count;
1383 CidTarget(intptr_t cid_arg,
1384 Function* target_arg,
1385 intptr_t count_arg)
1386 : cid(cid_arg), target(target_arg), count(count_arg) {}
1387 };
1388
1389
1390 // Returns 'sorted' array in decreasing count order.
1391 // The expected number of elements to sort is less than 10.
1392 static void SortICDataByCount(const ICData& ic_data,
1393 GrowableArray<CidTarget>* sorted) {
1394 ASSERT(ic_data.num_args_tested() == 1);
1395 const intptr_t len = ic_data.NumberOfChecks();
1396 sorted->Clear();
1397
1398 for (int i = 0; i < len; i++) {
1399 sorted->Add(CidTarget(ic_data.GetReceiverClassIdAt(i),
1400 &Function::ZoneHandle(ic_data.GetTargetAt(i)),
1401 ic_data.GetCountAt(i)));
1402 }
1403 for (int i = 0; i < len; i++) {
1404 intptr_t largest_ix = i;
1405 for (int k = i + 1; k < len; k++) {
1406 if ((*sorted)[largest_ix].count < (*sorted)[k].count) {
1407 largest_ix = k;
1408 }
1409 }
1410 if (i != largest_ix) {
1411 // Swap.
1412 CidTarget temp = (*sorted)[i];
1413 (*sorted)[i] = (*sorted)[largest_ix];
1414 (*sorted)[largest_ix] = temp;
1415 }
1416 }
1417 }
srdjan 2013/01/16 19:06:30 Put SortICDataByCount into shared FlowGraphCompile
regis 2013/01/16 23:14:42 Added TODO(regis): Make static member, move to sha
1418
1419
1420 void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data,
1421 Register class_id_reg,
1422 intptr_t arg_count,
1423 const Array& arg_names,
1424 Label* deopt,
1425 intptr_t deopt_id,
1426 intptr_t token_index,
1427 LocationSummary* locs) {
1428 ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0));
1429 Label match_found;
1430 const intptr_t len = ic_data.NumberOfChecks();
1431 GrowableArray<CidTarget> sorted(len);
1432 SortICDataByCount(ic_data, &sorted);
1433 for (intptr_t i = 0; i < len; i++) {
1434 const bool is_last_check = (i == (len - 1));
1435 Label next_test;
1436 assembler()->cmpl(class_id_reg, Immediate(sorted[i].cid));
1437 if (is_last_check) {
1438 assembler()->j(NOT_EQUAL, deopt);
1439 } else {
1440 assembler()->j(NOT_EQUAL, &next_test);
1441 }
1442 GenerateStaticCall(deopt_id,
1443 token_index,
1444 *sorted[i].target,
1445 arg_count,
1446 arg_names,
1447 locs);
1448 if (!is_last_check) {
1449 assembler()->jmp(&match_found);
1450 }
1451 assembler()->Bind(&next_test);
1452 }
1453 assembler()->Bind(&match_found);
1454 }
1455
1456
1457 void FlowGraphCompiler::EmitDoubleCompareBranch(Condition true_condition,
1458 FpuRegister left,
1459 FpuRegister right,
1460 BranchInstr* branch) {
1461 ASSERT(branch != NULL);
1462 assembler()->comisd(left, right);
1463 BlockEntryInstr* nan_result = (true_condition == NOT_EQUAL) ?
1464 branch->true_successor() : branch->false_successor();
1465 assembler()->j(PARITY_EVEN, GetBlockLabel(nan_result));
1466 branch->EmitBranchOnCondition(this, true_condition);
1467 }
1468
1469
1470
1471 void FlowGraphCompiler::EmitDoubleCompareBool(Condition true_condition,
1472 FpuRegister left,
1473 FpuRegister right,
1474 Register result) {
1475 assembler()->comisd(left, right);
1476 Label is_false, is_true, done;
1477 assembler()->j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN false;
1478 assembler()->j(true_condition, &is_true, Assembler::kNearJump);
1479 assembler()->Bind(&is_false);
1480 assembler()->LoadObject(result, Bool::False());
1481 assembler()->jmp(&done);
1482 assembler()->Bind(&is_true);
1483 assembler()->LoadObject(result, Bool::True());
1484 assembler()->Bind(&done);
1485 }
1486
1487
1488 Condition FlowGraphCompiler::FlipCondition(Condition condition) {
1489 switch (condition) {
1490 case EQUAL: return EQUAL;
1491 case NOT_EQUAL: return NOT_EQUAL;
1492 case LESS: return GREATER;
1493 case LESS_EQUAL: return GREATER_EQUAL;
1494 case GREATER: return LESS;
1495 case GREATER_EQUAL: return LESS_EQUAL;
1496 case BELOW: return ABOVE;
1497 case BELOW_EQUAL: return ABOVE_EQUAL;
1498 case ABOVE: return BELOW;
1499 case ABOVE_EQUAL: return BELOW_EQUAL;
1500 default:
1501 UNIMPLEMENTED();
1502 return EQUAL;
1503 }
1504 }
1505
1506
1507 bool FlowGraphCompiler::EvaluateCondition(Condition condition,
1508 intptr_t left,
1509 intptr_t right) {
1510 const uintptr_t unsigned_left = static_cast<uintptr_t>(left);
1511 const uintptr_t unsigned_right = static_cast<uintptr_t>(right);
1512 switch (condition) {
1513 case EQUAL: return left == right;
1514 case NOT_EQUAL: return left != right;
1515 case LESS: return left < right;
1516 case LESS_EQUAL: return left <= right;
1517 case GREATER: return left > right;
1518 case GREATER_EQUAL: return left >= right;
1519 case BELOW: return unsigned_left < unsigned_right;
1520 case BELOW_EQUAL: return unsigned_left <= unsigned_right;
1521 case ABOVE: return unsigned_left > unsigned_right;
1522 case ABOVE_EQUAL: return unsigned_left >= unsigned_right;
1523 default:
1524 UNIMPLEMENTED();
1525 return false;
1526 }
1527 }
1528
1529
1530 FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
1531 Register array,
1532 intptr_t index) {
1533 const int64_t disp =
1534 static_cast<int64_t>(index) * ElementSizeFor(cid) + DataOffsetFor(cid);
1535 ASSERT(Utils::IsInt(32, disp));
1536 return FieldAddress(array, static_cast<int32_t>(disp));
1537 }
1538
1539
1540 FieldAddress FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
1541 Register array,
1542 Register index) {
1543 // Note that index is smi-tagged, (i.e, times 2) for all arrays with element
1544 // size > 1. For Uint8Array and OneByteString the index is expected to be
1545 // untagged before accessing.
1546 ASSERT(kSmiTagShift == 1);
1547 switch (cid) {
1548 case kArrayCid:
1549 case kImmutableArrayCid:
1550 return FieldAddress(
1551 array, index, TIMES_HALF_WORD_SIZE, Array::data_offset());
1552 case kFloat32ArrayCid:
1553 return FieldAddress(array, index, TIMES_2, Float32Array::data_offset());
1554 case kFloat64ArrayCid:
1555 return FieldAddress(array, index, TIMES_4, Float64Array::data_offset());
1556 case kUint8ArrayCid:
1557 return FieldAddress(array, index, TIMES_1, Uint8Array::data_offset());
1558 case kUint8ClampedArrayCid:
1559 return
1560 FieldAddress(array, index, TIMES_1, Uint8ClampedArray::data_offset());
1561 case kOneByteStringCid:
1562 return FieldAddress(array, index, TIMES_1, OneByteString::data_offset());
1563 case kTwoByteStringCid:
1564 return FieldAddress(array, index, TIMES_1, TwoByteString::data_offset());
1565 default:
1566 UNIMPLEMENTED();
1567 return FieldAddress(SPREG, 0);
1568 }
1569 }
1570
1571
1369 #undef __ 1572 #undef __
1370 #define __ compiler_->assembler()-> 1573 #define __ compiler_->assembler()->
1371 1574
1372 1575
1373 void ParallelMoveResolver::EmitMove(int index) { 1576 void ParallelMoveResolver::EmitMove(int index) {
1374 MoveOperands* move = moves_[index]; 1577 MoveOperands* move = moves_[index];
1375 const Location source = move->src(); 1578 const Location source = move->src();
1376 const Location destination = move->dest(); 1579 const Location destination = move->dest();
1377 1580
1378 if (source.IsRegister()) { 1581 if (source.IsRegister()) {
1379 if (destination.IsRegister()) { 1582 if (destination.IsRegister()) {
1380 __ movl(destination.reg(), source.reg()); 1583 __ movl(destination.reg(), source.reg());
1381 } else { 1584 } else {
1382 ASSERT(destination.IsStackSlot()); 1585 ASSERT(destination.IsStackSlot());
1383 __ movl(destination.ToStackSlotAddress(), source.reg()); 1586 __ movl(destination.ToStackSlotAddress(), source.reg());
1384 } 1587 }
1385 } else if (source.IsStackSlot()) { 1588 } else if (source.IsStackSlot()) {
1386 if (destination.IsRegister()) { 1589 if (destination.IsRegister()) {
1387 __ movl(destination.reg(), source.ToStackSlotAddress()); 1590 __ movl(destination.reg(), source.ToStackSlotAddress());
1388 } else { 1591 } else {
1389 ASSERT(destination.IsStackSlot()); 1592 ASSERT(destination.IsStackSlot());
1390 MoveMemoryToMemory(destination.ToStackSlotAddress(), 1593 MoveMemoryToMemory(destination.ToStackSlotAddress(),
1391 source.ToStackSlotAddress()); 1594 source.ToStackSlotAddress());
1392 } 1595 }
1393 } else if (source.IsXmmRegister()) { 1596 } else if (source.IsFpuRegister()) {
1394 if (destination.IsXmmRegister()) { 1597 if (destination.IsFpuRegister()) {
1395 // Optimization manual recommends using MOVAPS for register 1598 // Optimization manual recommends using MOVAPS for register
1396 // to register moves. 1599 // to register moves.
1397 __ movaps(destination.xmm_reg(), source.xmm_reg()); 1600 __ movaps(destination.fpu_reg(), source.fpu_reg());
1398 } else { 1601 } else {
1399 ASSERT(destination.IsDoubleStackSlot()); 1602 ASSERT(destination.IsDoubleStackSlot());
1400 __ movsd(destination.ToStackSlotAddress(), source.xmm_reg()); 1603 __ movsd(destination.ToStackSlotAddress(), source.fpu_reg());
1401 } 1604 }
1402 } else if (source.IsDoubleStackSlot()) { 1605 } else if (source.IsDoubleStackSlot()) {
1403 if (destination.IsXmmRegister()) { 1606 if (destination.IsFpuRegister()) {
1404 __ movsd(destination.xmm_reg(), source.ToStackSlotAddress()); 1607 __ movsd(destination.fpu_reg(), source.ToStackSlotAddress());
1405 } else { 1608 } else {
1406 ASSERT(destination.IsDoubleStackSlot()); 1609 ASSERT(destination.IsDoubleStackSlot());
1407 __ movsd(XMM0, source.ToStackSlotAddress()); 1610 __ movsd(XMM0, source.ToStackSlotAddress());
1408 __ movsd(destination.ToStackSlotAddress(), XMM0); 1611 __ movsd(destination.ToStackSlotAddress(), XMM0);
1409 } 1612 }
1410 } else { 1613 } else {
1411 ASSERT(source.IsConstant()); 1614 ASSERT(source.IsConstant());
1412 if (destination.IsRegister()) { 1615 if (destination.IsRegister()) {
1413 const Object& constant = source.constant(); 1616 const Object& constant = source.constant();
1414 if (constant.IsSmi() && (Smi::Cast(constant).Value() == 0)) { 1617 if (constant.IsSmi() && (Smi::Cast(constant).Value() == 0)) {
(...skipping 17 matching lines...) Expand all
1432 const Location destination = move->dest(); 1635 const Location destination = move->dest();
1433 1636
1434 if (source.IsRegister() && destination.IsRegister()) { 1637 if (source.IsRegister() && destination.IsRegister()) {
1435 __ xchgl(destination.reg(), source.reg()); 1638 __ xchgl(destination.reg(), source.reg());
1436 } else if (source.IsRegister() && destination.IsStackSlot()) { 1639 } else if (source.IsRegister() && destination.IsStackSlot()) {
1437 Exchange(source.reg(), destination.ToStackSlotAddress()); 1640 Exchange(source.reg(), destination.ToStackSlotAddress());
1438 } else if (source.IsStackSlot() && destination.IsRegister()) { 1641 } else if (source.IsStackSlot() && destination.IsRegister()) {
1439 Exchange(destination.reg(), source.ToStackSlotAddress()); 1642 Exchange(destination.reg(), source.ToStackSlotAddress());
1440 } else if (source.IsStackSlot() && destination.IsStackSlot()) { 1643 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1441 Exchange(destination.ToStackSlotAddress(), source.ToStackSlotAddress()); 1644 Exchange(destination.ToStackSlotAddress(), source.ToStackSlotAddress());
1442 } else if (source.IsXmmRegister() && destination.IsXmmRegister()) { 1645 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1443 __ movaps(XMM0, source.xmm_reg()); 1646 __ movaps(XMM0, source.fpu_reg());
1444 __ movaps(source.xmm_reg(), destination.xmm_reg()); 1647 __ movaps(source.fpu_reg(), destination.fpu_reg());
1445 __ movaps(destination.xmm_reg(), XMM0); 1648 __ movaps(destination.fpu_reg(), XMM0);
1446 } else if (source.IsXmmRegister() || destination.IsXmmRegister()) { 1649 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1447 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot()); 1650 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot());
1448 XmmRegister reg = source.IsXmmRegister() ? source.xmm_reg() 1651 XmmRegister reg = source.IsFpuRegister() ? source.fpu_reg()
1449 : destination.xmm_reg(); 1652 : destination.fpu_reg();
1450 Address slot_address = source.IsXmmRegister() 1653 Address slot_address = source.IsFpuRegister()
1451 ? destination.ToStackSlotAddress() 1654 ? destination.ToStackSlotAddress()
1452 : source.ToStackSlotAddress(); 1655 : source.ToStackSlotAddress();
1453 1656
1454 __ movsd(XMM0, slot_address); 1657 __ movsd(XMM0, slot_address);
1455 __ movsd(slot_address, reg); 1658 __ movsd(slot_address, reg);
1456 __ movaps(reg, XMM0); 1659 __ movaps(reg, XMM0);
1457 } else { 1660 } else {
1458 UNREACHABLE(); 1661 UNREACHABLE();
1459 } 1662 }
1460 1663
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1521 __ popl(ECX); 1724 __ popl(ECX);
1522 __ popl(EAX); 1725 __ popl(EAX);
1523 } 1726 }
1524 1727
1525 1728
1526 #undef __ 1729 #undef __
1527 1730
1528 } // namespace dart 1731 } // namespace dart
1529 1732
1530 #endif // defined TARGET_ARCH_IA32 1733 #endif // defined TARGET_ARCH_IA32
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698