OLD | NEW |
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 806 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
817 private: | 817 private: |
818 Token::Value op_; | 818 Token::Value op_; |
819 Register dst_; | 819 Register dst_; |
820 Register left_; | 820 Register left_; |
821 Register right_; | 821 Register right_; |
822 OverwriteMode mode_; | 822 OverwriteMode mode_; |
823 }; | 823 }; |
824 | 824 |
825 | 825 |
826 void DeferredInlineBinaryOperation::Generate() { | 826 void DeferredInlineBinaryOperation::Generate() { |
827 __ push(left_); | 827 GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB); |
828 __ push(right_); | 828 stub.GenerateCall(masm_, left_, right_); |
829 GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED); | |
830 __ CallStub(&stub); | |
831 if (!dst_.is(eax)) __ mov(dst_, eax); | 829 if (!dst_.is(eax)) __ mov(dst_, eax); |
832 } | 830 } |
833 | 831 |
834 | 832 |
835 void CodeGenerator::GenericBinaryOperation(Token::Value op, | 833 void CodeGenerator::GenericBinaryOperation(Token::Value op, |
836 SmiAnalysis* type, | 834 SmiAnalysis* type, |
837 OverwriteMode overwrite_mode) { | 835 OverwriteMode overwrite_mode) { |
838 Comment cmnt(masm_, "[ BinaryOperation"); | 836 Comment cmnt(masm_, "[ BinaryOperation"); |
839 Comment cmnt_token(masm_, Token::String(op)); | 837 Comment cmnt_token(masm_, Token::String(op)); |
840 | 838 |
841 if (op == Token::COMMA) { | 839 if (op == Token::COMMA) { |
842 // Simply discard left value. | 840 // Simply discard left value. |
843 frame_->Nip(1); | 841 frame_->Nip(1); |
844 return; | 842 return; |
845 } | 843 } |
846 | 844 |
847 // Set the flags based on the operation, type and loop nesting level. | 845 // Set the flags based on the operation, type and loop nesting level. |
848 GenericBinaryFlags flags; | 846 GenericBinaryFlags flags; |
849 switch (op) { | 847 switch (op) { |
850 case Token::BIT_OR: | 848 case Token::BIT_OR: |
851 case Token::BIT_AND: | 849 case Token::BIT_AND: |
852 case Token::BIT_XOR: | 850 case Token::BIT_XOR: |
853 case Token::SHL: | 851 case Token::SHL: |
854 case Token::SHR: | 852 case Token::SHR: |
855 case Token::SAR: | 853 case Token::SAR: |
856 // Bit operations always assume they likely operate on Smis. Still only | 854 // Bit operations always assume they likely operate on Smis. Still only |
857 // generate the inline Smi check code if this operation is part of a loop. | 855 // generate the inline Smi check code if this operation is part of a loop. |
858 flags = (loop_nesting() > 0) | 856 flags = (loop_nesting() > 0) |
859 ? SMI_CODE_INLINED | 857 ? NO_SMI_CODE_IN_STUB |
860 : SMI_CODE_IN_STUB; | 858 : NO_GENERIC_BINARY_FLAGS; |
861 break; | 859 break; |
862 | 860 |
863 default: | 861 default: |
864 // By default only inline the Smi check code for likely smis if this | 862 // By default only inline the Smi check code for likely smis if this |
865 // operation is part of a loop. | 863 // operation is part of a loop. |
866 flags = ((loop_nesting() > 0) && type->IsLikelySmi()) | 864 flags = ((loop_nesting() > 0) && type->IsLikelySmi()) |
867 ? SMI_CODE_INLINED | 865 ? NO_SMI_CODE_IN_STUB |
868 : SMI_CODE_IN_STUB; | 866 : NO_GENERIC_BINARY_FLAGS; |
869 break; | 867 break; |
870 } | 868 } |
871 | 869 |
872 Result right = frame_->Pop(); | 870 Result right = frame_->Pop(); |
873 Result left = frame_->Pop(); | 871 Result left = frame_->Pop(); |
874 | 872 |
875 if (op == Token::ADD) { | 873 if (op == Token::ADD) { |
876 bool left_is_string = left.is_constant() && left.handle()->IsString(); | 874 bool left_is_string = left.is_constant() && left.handle()->IsString(); |
877 bool right_is_string = right.is_constant() && right.handle()->IsString(); | 875 bool right_is_string = right.is_constant() && right.handle()->IsString(); |
878 if (left_is_string || right_is_string) { | 876 if (left_is_string || right_is_string) { |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
917 } else if (right_is_smi) { | 915 } else if (right_is_smi) { |
918 ConstantSmiBinaryOperation(op, &left, right.handle(), | 916 ConstantSmiBinaryOperation(op, &left, right.handle(), |
919 type, false, overwrite_mode); | 917 type, false, overwrite_mode); |
920 return; | 918 return; |
921 } else if (left_is_smi) { | 919 } else if (left_is_smi) { |
922 ConstantSmiBinaryOperation(op, &right, left.handle(), | 920 ConstantSmiBinaryOperation(op, &right, left.handle(), |
923 type, true, overwrite_mode); | 921 type, true, overwrite_mode); |
924 return; | 922 return; |
925 } | 923 } |
926 | 924 |
927 if (flags == SMI_CODE_INLINED && !generate_no_smi_code) { | 925 if (((flags & NO_SMI_CODE_IN_STUB) != 0) && !generate_no_smi_code) { |
928 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode); | 926 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode); |
929 } else { | 927 } else { |
930 frame_->Push(&left); | 928 frame_->Push(&left); |
931 frame_->Push(&right); | 929 frame_->Push(&right); |
932 // If we know the arguments aren't smis, use the binary operation stub | 930 // If we know the arguments aren't smis, use the binary operation stub |
933 // that does not check for the fast smi case. | 931 // that does not check for the fast smi case. |
934 // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED. | |
935 if (generate_no_smi_code) { | 932 if (generate_no_smi_code) { |
936 flags = SMI_CODE_INLINED; | 933 flags = NO_SMI_CODE_IN_STUB; |
937 } | 934 } |
938 GenericBinaryOpStub stub(op, overwrite_mode, flags); | 935 GenericBinaryOpStub stub(op, overwrite_mode, flags); |
939 Result answer = frame_->CallStub(&stub, 2); | 936 Result answer = frame_->CallStub(&stub, 2); |
940 frame_->Push(&answer); | 937 frame_->Push(&answer); |
941 } | 938 } |
942 } | 939 } |
943 | 940 |
944 | 941 |
945 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { | 942 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { |
946 Object* answer_object = Heap::undefined_value(); | 943 Object* answer_object = Heap::undefined_value(); |
(...skipping 422 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1369 private: | 1366 private: |
1370 Token::Value op_; | 1367 Token::Value op_; |
1371 Register dst_; | 1368 Register dst_; |
1372 Register src_; | 1369 Register src_; |
1373 Smi* value_; | 1370 Smi* value_; |
1374 OverwriteMode overwrite_mode_; | 1371 OverwriteMode overwrite_mode_; |
1375 }; | 1372 }; |
1376 | 1373 |
1377 | 1374 |
1378 void DeferredInlineSmiOperation::Generate() { | 1375 void DeferredInlineSmiOperation::Generate() { |
1379 __ push(src_); | |
1380 __ push(Immediate(value_)); | |
1381 // For mod we don't generate all the Smi code inline. | 1376 // For mod we don't generate all the Smi code inline. |
1382 GenericBinaryOpStub stub( | 1377 GenericBinaryOpStub stub( |
1383 op_, | 1378 op_, |
1384 overwrite_mode_, | 1379 overwrite_mode_, |
1385 (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED); | 1380 (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB); |
1386 __ CallStub(&stub); | 1381 stub.GenerateCall(masm_, src_, value_); |
1387 if (!dst_.is(eax)) __ mov(dst_, eax); | 1382 if (!dst_.is(eax)) __ mov(dst_, eax); |
1388 } | 1383 } |
1389 | 1384 |
1390 | 1385 |
1391 // Call the appropriate binary operation stub to compute value op src | 1386 // Call the appropriate binary operation stub to compute value op src |
1392 // and leave the result in dst. | 1387 // and leave the result in dst. |
1393 class DeferredInlineSmiOperationReversed: public DeferredCode { | 1388 class DeferredInlineSmiOperationReversed: public DeferredCode { |
1394 public: | 1389 public: |
1395 DeferredInlineSmiOperationReversed(Token::Value op, | 1390 DeferredInlineSmiOperationReversed(Token::Value op, |
1396 Register dst, | 1391 Register dst, |
(...skipping 13 matching lines...) Expand all Loading... |
1410 private: | 1405 private: |
1411 Token::Value op_; | 1406 Token::Value op_; |
1412 Register dst_; | 1407 Register dst_; |
1413 Smi* value_; | 1408 Smi* value_; |
1414 Register src_; | 1409 Register src_; |
1415 OverwriteMode overwrite_mode_; | 1410 OverwriteMode overwrite_mode_; |
1416 }; | 1411 }; |
1417 | 1412 |
1418 | 1413 |
1419 void DeferredInlineSmiOperationReversed::Generate() { | 1414 void DeferredInlineSmiOperationReversed::Generate() { |
1420 __ push(Immediate(value_)); | 1415 GenericBinaryOpStub igostub(op_, overwrite_mode_, NO_SMI_CODE_IN_STUB); |
1421 __ push(src_); | 1416 igostub.GenerateCall(masm_, value_, src_); |
1422 GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); | |
1423 __ CallStub(&igostub); | |
1424 if (!dst_.is(eax)) __ mov(dst_, eax); | 1417 if (!dst_.is(eax)) __ mov(dst_, eax); |
1425 } | 1418 } |
1426 | 1419 |
1427 | 1420 |
1428 // The result of src + value is in dst. It either overflowed or was not | 1421 // The result of src + value is in dst. It either overflowed or was not |
1429 // smi tagged. Undo the speculative addition and call the appropriate | 1422 // smi tagged. Undo the speculative addition and call the appropriate |
1430 // specialized stub for add. The result is left in dst. | 1423 // specialized stub for add. The result is left in dst. |
1431 class DeferredInlineSmiAdd: public DeferredCode { | 1424 class DeferredInlineSmiAdd: public DeferredCode { |
1432 public: | 1425 public: |
1433 DeferredInlineSmiAdd(Register dst, | 1426 DeferredInlineSmiAdd(Register dst, |
1434 Smi* value, | 1427 Smi* value, |
1435 OverwriteMode overwrite_mode) | 1428 OverwriteMode overwrite_mode) |
1436 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { | 1429 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { |
1437 set_comment("[ DeferredInlineSmiAdd"); | 1430 set_comment("[ DeferredInlineSmiAdd"); |
1438 } | 1431 } |
1439 | 1432 |
1440 virtual void Generate(); | 1433 virtual void Generate(); |
1441 | 1434 |
1442 private: | 1435 private: |
1443 Register dst_; | 1436 Register dst_; |
1444 Smi* value_; | 1437 Smi* value_; |
1445 OverwriteMode overwrite_mode_; | 1438 OverwriteMode overwrite_mode_; |
1446 }; | 1439 }; |
1447 | 1440 |
1448 | 1441 |
1449 void DeferredInlineSmiAdd::Generate() { | 1442 void DeferredInlineSmiAdd::Generate() { |
1450 // Undo the optimistic add operation and call the shared stub. | 1443 // Undo the optimistic add operation and call the shared stub. |
1451 __ sub(Operand(dst_), Immediate(value_)); | 1444 __ sub(Operand(dst_), Immediate(value_)); |
1452 __ push(dst_); | 1445 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB); |
1453 __ push(Immediate(value_)); | 1446 igostub.GenerateCall(masm_, dst_, value_); |
1454 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); | |
1455 __ CallStub(&igostub); | |
1456 if (!dst_.is(eax)) __ mov(dst_, eax); | 1447 if (!dst_.is(eax)) __ mov(dst_, eax); |
1457 } | 1448 } |
1458 | 1449 |
1459 | 1450 |
1460 // The result of value + src is in dst. It either overflowed or was not | 1451 // The result of value + src is in dst. It either overflowed or was not |
1461 // smi tagged. Undo the speculative addition and call the appropriate | 1452 // smi tagged. Undo the speculative addition and call the appropriate |
1462 // specialized stub for add. The result is left in dst. | 1453 // specialized stub for add. The result is left in dst. |
1463 class DeferredInlineSmiAddReversed: public DeferredCode { | 1454 class DeferredInlineSmiAddReversed: public DeferredCode { |
1464 public: | 1455 public: |
1465 DeferredInlineSmiAddReversed(Register dst, | 1456 DeferredInlineSmiAddReversed(Register dst, |
1466 Smi* value, | 1457 Smi* value, |
1467 OverwriteMode overwrite_mode) | 1458 OverwriteMode overwrite_mode) |
1468 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { | 1459 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { |
1469 set_comment("[ DeferredInlineSmiAddReversed"); | 1460 set_comment("[ DeferredInlineSmiAddReversed"); |
1470 } | 1461 } |
1471 | 1462 |
1472 virtual void Generate(); | 1463 virtual void Generate(); |
1473 | 1464 |
1474 private: | 1465 private: |
1475 Register dst_; | 1466 Register dst_; |
1476 Smi* value_; | 1467 Smi* value_; |
1477 OverwriteMode overwrite_mode_; | 1468 OverwriteMode overwrite_mode_; |
1478 }; | 1469 }; |
1479 | 1470 |
1480 | 1471 |
1481 void DeferredInlineSmiAddReversed::Generate() { | 1472 void DeferredInlineSmiAddReversed::Generate() { |
1482 // Undo the optimistic add operation and call the shared stub. | 1473 // Undo the optimistic add operation and call the shared stub. |
1483 __ sub(Operand(dst_), Immediate(value_)); | 1474 __ sub(Operand(dst_), Immediate(value_)); |
1484 __ push(Immediate(value_)); | 1475 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB); |
1485 __ push(dst_); | 1476 igostub.GenerateCall(masm_, value_, dst_); |
1486 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); | |
1487 __ CallStub(&igostub); | |
1488 if (!dst_.is(eax)) __ mov(dst_, eax); | 1477 if (!dst_.is(eax)) __ mov(dst_, eax); |
1489 } | 1478 } |
1490 | 1479 |
1491 | 1480 |
1492 // The result of src - value is in dst. It either overflowed or was not | 1481 // The result of src - value is in dst. It either overflowed or was not |
1493 // smi tagged. Undo the speculative subtraction and call the | 1482 // smi tagged. Undo the speculative subtraction and call the |
1494 // appropriate specialized stub for subtract. The result is left in | 1483 // appropriate specialized stub for subtract. The result is left in |
1495 // dst. | 1484 // dst. |
1496 class DeferredInlineSmiSub: public DeferredCode { | 1485 class DeferredInlineSmiSub: public DeferredCode { |
1497 public: | 1486 public: |
1498 DeferredInlineSmiSub(Register dst, | 1487 DeferredInlineSmiSub(Register dst, |
1499 Smi* value, | 1488 Smi* value, |
1500 OverwriteMode overwrite_mode) | 1489 OverwriteMode overwrite_mode) |
1501 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { | 1490 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { |
1502 set_comment("[ DeferredInlineSmiSub"); | 1491 set_comment("[ DeferredInlineSmiSub"); |
1503 } | 1492 } |
1504 | 1493 |
1505 virtual void Generate(); | 1494 virtual void Generate(); |
1506 | 1495 |
1507 private: | 1496 private: |
1508 Register dst_; | 1497 Register dst_; |
1509 Smi* value_; | 1498 Smi* value_; |
1510 OverwriteMode overwrite_mode_; | 1499 OverwriteMode overwrite_mode_; |
1511 }; | 1500 }; |
1512 | 1501 |
1513 | 1502 |
1514 void DeferredInlineSmiSub::Generate() { | 1503 void DeferredInlineSmiSub::Generate() { |
1515 // Undo the optimistic sub operation and call the shared stub. | 1504 // Undo the optimistic sub operation and call the shared stub. |
1516 __ add(Operand(dst_), Immediate(value_)); | 1505 __ add(Operand(dst_), Immediate(value_)); |
1517 __ push(dst_); | 1506 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB); |
1518 __ push(Immediate(value_)); | 1507 igostub.GenerateCall(masm_, dst_, value_); |
1519 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); | |
1520 __ CallStub(&igostub); | |
1521 if (!dst_.is(eax)) __ mov(dst_, eax); | 1508 if (!dst_.is(eax)) __ mov(dst_, eax); |
1522 } | 1509 } |
1523 | 1510 |
1524 | 1511 |
1525 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, | 1512 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, |
1526 Result* operand, | 1513 Result* operand, |
1527 Handle<Object> value, | 1514 Handle<Object> value, |
1528 SmiAnalysis* type, | 1515 SmiAnalysis* type, |
1529 bool reversed, | 1516 bool reversed, |
1530 OverwriteMode overwrite_mode) { | 1517 OverwriteMode overwrite_mode) { |
(...skipping 4985 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6516 // Return 1/0 for true/false in eax. | 6503 // Return 1/0 for true/false in eax. |
6517 __ bind(&true_result); | 6504 __ bind(&true_result); |
6518 __ mov(eax, 1); | 6505 __ mov(eax, 1); |
6519 __ ret(1 * kPointerSize); | 6506 __ ret(1 * kPointerSize); |
6520 __ bind(&false_result); | 6507 __ bind(&false_result); |
6521 __ mov(eax, 0); | 6508 __ mov(eax, 0); |
6522 __ ret(1 * kPointerSize); | 6509 __ ret(1 * kPointerSize); |
6523 } | 6510 } |
6524 | 6511 |
6525 | 6512 |
| 6513 void GenericBinaryOpStub::GenerateCall( |
| 6514 MacroAssembler* masm, |
| 6515 Register left, |
| 6516 Register right) { |
| 6517 if (!ArgsInRegistersSupported()) { |
| 6518 // Only pass arguments in registers if there is no smi code in the stub. |
| 6519 __ push(left); |
| 6520 __ push(right); |
| 6521 } else { |
| 6522 // The calling convention with registers is left in edx and right in eax. |
| 6523 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); |
| 6524 if (!(left.is(edx) && right.is(eax))) { |
| 6525 if (left.is(eax) && right.is(edx)) { |
| 6526 if (IsOperationCommutative()) { |
| 6527 SetArgsReversed(); |
| 6528 } else { |
| 6529 __ xchg(left, right); |
| 6530 } |
| 6531 } else if (left.is(edx)) { |
| 6532 __ mov(eax, right); |
| 6533 } else if (left.is(eax)) { |
| 6534 if (IsOperationCommutative()) { |
| 6535 __ mov(edx, right); |
| 6536 SetArgsReversed(); |
| 6537 } else { |
| 6538 __ mov(edx, left); |
| 6539 __ mov(eax, right); |
| 6540 } |
| 6541 } else if (right.is(edx)) { |
| 6542 if (IsOperationCommutative()) { |
| 6543 __ mov(eax, left); |
| 6544 SetArgsReversed(); |
| 6545 } else { |
| 6546 __ mov(eax, right); |
| 6547 __ mov(edx, left); |
| 6548 } |
| 6549 } else if (right.is(eax)) { |
| 6550 __ mov(edx, left); |
| 6551 } else { |
| 6552 __ mov(edx, left); |
| 6553 __ mov(eax, right); |
| 6554 } |
| 6555 } |
| 6556 |
| 6557 // Update flags to indicate that arguments are in registers. |
| 6558 SetArgsInRegisters(); |
| 6559 } |
| 6560 |
| 6561 // Call the stub. |
| 6562 __ CallStub(this); |
| 6563 } |
| 6564 |
| 6565 |
| 6566 void GenericBinaryOpStub::GenerateCall( |
| 6567 MacroAssembler* masm, |
| 6568 Register left, |
| 6569 Smi* right) { |
| 6570 if (!ArgsInRegistersSupported()) { |
| 6571 // Only pass arguments in registers if there is no smi code in the stub. |
| 6572 __ push(left); |
| 6573 __ push(Immediate(right)); |
| 6574 } else { |
| 6575 // Adapt arguments to the calling convention left in edx and right in eax. |
| 6576 if (left.is(edx)) { |
| 6577 __ mov(eax, Immediate(right)); |
| 6578 } else if (left.is(eax) && IsOperationCommutative()) { |
| 6579 __ mov(edx, Immediate(right)); |
| 6580 SetArgsReversed(); |
| 6581 } else { |
| 6582 __ mov(edx, left); |
| 6583 __ mov(eax, Immediate(right)); |
| 6584 } |
| 6585 |
| 6586 // Update flags to indicate that arguments are in registers. |
| 6587 SetArgsInRegisters(); |
| 6588 } |
| 6589 |
| 6590 // Call the stub. |
| 6591 __ CallStub(this); |
| 6592 } |
| 6593 |
| 6594 |
| 6595 void GenericBinaryOpStub::GenerateCall( |
| 6596 MacroAssembler* masm, |
| 6597 Smi* left, |
| 6598 Register right) { |
| 6599 if (flags_ != NO_SMI_CODE_IN_STUB) { |
| 6600 // Only pass arguments in registers if there is no smi code in the stub. |
| 6601 __ push(Immediate(left)); |
| 6602 __ push(right); |
| 6603 } else { |
| 6604 // Adapt arguments to the calling convention left in edx and right in eax. |
| 6605 bool is_commutative = (op_ == (Token::ADD) || (op_ == Token::MUL)); |
| 6606 if (right.is(eax)) { |
| 6607 __ mov(edx, Immediate(left)); |
| 6608 } else if (right.is(edx) && is_commutative) { |
| 6609 __ mov(eax, Immediate(left)); |
| 6610 } else { |
| 6611 __ mov(edx, Immediate(left)); |
| 6612 __ mov(eax, right); |
| 6613 } |
| 6614 // Update flags to indicate that arguments are in registers. |
| 6615 SetArgsInRegisters(); |
| 6616 } |
| 6617 |
| 6618 // Call the stub. |
| 6619 __ CallStub(this); |
| 6620 } |
| 6621 |
| 6622 |
6526 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { | 6623 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
6527 // Perform fast-case smi code for the operation (eax <op> ebx) and | 6624 // Perform fast-case smi code for the operation (eax <op> ebx) and |
6528 // leave result in register eax. | 6625 // leave result in register eax. |
6529 | 6626 |
6530 // Prepare the smi check of both operands by or'ing them together | 6627 // Prepare the smi check of both operands by or'ing them together |
6531 // before checking against the smi mask. | 6628 // before checking against the smi mask. |
6532 __ mov(ecx, Operand(ebx)); | 6629 __ mov(ecx, Operand(ebx)); |
6533 __ or_(ecx, Operand(eax)); | 6630 __ or_(ecx, Operand(eax)); |
6534 | 6631 |
6535 switch (op_) { | 6632 switch (op_) { |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6663 default: | 6760 default: |
6664 UNREACHABLE(); | 6761 UNREACHABLE(); |
6665 break; | 6762 break; |
6666 } | 6763 } |
6667 } | 6764 } |
6668 | 6765 |
6669 | 6766 |
6670 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | 6767 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
6671 Label call_runtime; | 6768 Label call_runtime; |
6672 | 6769 |
6673 if (flags_ == SMI_CODE_IN_STUB) { | 6770 __ IncrementCounter(&Counters::generic_binary_stub_calls, 1); |
6674 // The fast case smi code wasn't inlined in the stub caller | 6771 |
6675 // code. Generate it here to speed up common operations. | 6772 // Generate fast case smi code if requested. This flag is set when the fast |
| 6773 // case smi code is not generated by the caller. Generating it here will speed |
| 6774 // up common operations. |
| 6775 if (HasSmiCodeInStub()) { |
6676 Label slow; | 6776 Label slow; |
6677 __ mov(ebx, Operand(esp, 1 * kPointerSize)); // get y | 6777 __ mov(ebx, Operand(esp, 1 * kPointerSize)); |
6678 __ mov(eax, Operand(esp, 2 * kPointerSize)); // get x | 6778 __ mov(eax, Operand(esp, 2 * kPointerSize)); |
6679 GenerateSmiCode(masm, &slow); | 6779 GenerateSmiCode(masm, &slow); |
6680 __ ret(2 * kPointerSize); // remove both operands | 6780 GenerateReturn(masm); |
6681 | |
6682 // Too bad. The fast case smi code didn't succeed. | 6781 // Too bad. The fast case smi code didn't succeed. |
6683 __ bind(&slow); | 6782 __ bind(&slow); |
6684 } | 6783 } |
6685 | 6784 |
6686 // Setup registers. | 6785 // Make sure the arguments are in edx and eax. |
6687 __ mov(eax, Operand(esp, 1 * kPointerSize)); // get y | 6786 GenerateLoadArguments(masm); |
6688 __ mov(edx, Operand(esp, 2 * kPointerSize)); // get x | |
6689 | 6787 |
6690 // Floating point case. | 6788 // Floating point case. |
6691 switch (op_) { | 6789 switch (op_) { |
6692 case Token::ADD: | 6790 case Token::ADD: |
6693 case Token::SUB: | 6791 case Token::SUB: |
6694 case Token::MUL: | 6792 case Token::MUL: |
6695 case Token::DIV: { | 6793 case Token::DIV: { |
6696 // eax: y | 6794 // eax: y |
6697 // edx: x | 6795 // edx: x |
6698 | 6796 |
(...skipping 13 matching lines...) Expand all Loading... |
6712 switch (mode_) { | 6810 switch (mode_) { |
6713 case OVERWRITE_LEFT: | 6811 case OVERWRITE_LEFT: |
6714 __ mov(eax, Operand(edx)); | 6812 __ mov(eax, Operand(edx)); |
6715 // Fall through! | 6813 // Fall through! |
6716 case OVERWRITE_RIGHT: | 6814 case OVERWRITE_RIGHT: |
6717 // If the argument in eax is already an object, we skip the | 6815 // If the argument in eax is already an object, we skip the |
6718 // allocation of a heap number. | 6816 // allocation of a heap number. |
6719 __ test(eax, Immediate(kSmiTagMask)); | 6817 __ test(eax, Immediate(kSmiTagMask)); |
6720 __ j(not_zero, &skip_allocation, not_taken); | 6818 __ j(not_zero, &skip_allocation, not_taken); |
6721 // Fall through! | 6819 // Fall through! |
6722 case NO_OVERWRITE: | 6820 case NO_OVERWRITE: { |
| 6821 // Allocate a heap number for the result. Keep eax and edx intact |
| 6822 // for the possible runtime call. |
6723 FloatingPointHelper::AllocateHeapNumber(masm, | 6823 FloatingPointHelper::AllocateHeapNumber(masm, |
6724 &call_runtime, | 6824 &call_runtime, |
6725 ecx, | 6825 ecx, |
6726 edx, | 6826 no_reg, |
6727 eax); | 6827 ebx); |
| 6828 // Now eax can be overwritten losing one of the arguments as we are |
| 6829 // now done and will not need it any more. |
| 6830 __ mov(eax, ebx); |
6728 __ bind(&skip_allocation); | 6831 __ bind(&skip_allocation); |
6729 break; | 6832 break; |
| 6833 } |
6730 default: UNREACHABLE(); | 6834 default: UNREACHABLE(); |
6731 } | 6835 } |
6732 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); | 6836 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
6733 __ ret(2 * kPointerSize); | 6837 GenerateReturn(masm); |
6734 | |
6735 } else { // SSE2 not available, use FPU. | 6838 } else { // SSE2 not available, use FPU. |
6736 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); | 6839 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); |
6737 // Allocate a heap number, if needed. | 6840 // Allocate a heap number, if needed. |
6738 Label skip_allocation; | 6841 Label skip_allocation; |
6739 switch (mode_) { | 6842 switch (mode_) { |
6740 case OVERWRITE_LEFT: | 6843 case OVERWRITE_LEFT: |
6741 __ mov(eax, Operand(edx)); | 6844 __ mov(eax, Operand(edx)); |
6742 // Fall through! | 6845 // Fall through! |
6743 case OVERWRITE_RIGHT: | 6846 case OVERWRITE_RIGHT: |
6744 // If the argument in eax is already an object, we skip the | 6847 // If the argument in eax is already an object, we skip the |
6745 // allocation of a heap number. | 6848 // allocation of a heap number. |
6746 __ test(eax, Immediate(kSmiTagMask)); | 6849 __ test(eax, Immediate(kSmiTagMask)); |
6747 __ j(not_zero, &skip_allocation, not_taken); | 6850 __ j(not_zero, &skip_allocation, not_taken); |
6748 // Fall through! | 6851 // Fall through! |
6749 case NO_OVERWRITE: | 6852 case NO_OVERWRITE: |
| 6853 // Allocate a heap number for the result. Keep eax and edx intact |
| 6854 // for the possible runtime call. |
6750 FloatingPointHelper::AllocateHeapNumber(masm, | 6855 FloatingPointHelper::AllocateHeapNumber(masm, |
6751 &call_runtime, | 6856 &call_runtime, |
6752 ecx, | 6857 ecx, |
6753 edx, | 6858 no_reg, |
6754 eax); | 6859 ebx); |
| 6860 // Now eax can be overwritten losing one of the arguments as we are |
| 6861 // now done and will not need it any more. |
| 6862 __ mov(eax, ebx); |
6755 __ bind(&skip_allocation); | 6863 __ bind(&skip_allocation); |
6756 break; | 6864 break; |
6757 default: UNREACHABLE(); | 6865 default: UNREACHABLE(); |
6758 } | 6866 } |
6759 FloatingPointHelper::LoadFloatOperands(masm, ecx); | 6867 FloatingPointHelper::LoadFloatOperands(masm, ecx); |
6760 | 6868 |
6761 switch (op_) { | 6869 switch (op_) { |
6762 case Token::ADD: __ faddp(1); break; | 6870 case Token::ADD: __ faddp(1); break; |
6763 case Token::SUB: __ fsubp(1); break; | 6871 case Token::SUB: __ fsubp(1); break; |
6764 case Token::MUL: __ fmulp(1); break; | 6872 case Token::MUL: __ fmulp(1); break; |
6765 case Token::DIV: __ fdivp(1); break; | 6873 case Token::DIV: __ fdivp(1); break; |
6766 default: UNREACHABLE(); | 6874 default: UNREACHABLE(); |
6767 } | 6875 } |
6768 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); | 6876 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
6769 __ ret(2 * kPointerSize); | 6877 GenerateReturn(masm); |
6770 } | 6878 } |
6771 } | 6879 } |
6772 case Token::MOD: { | 6880 case Token::MOD: { |
6773 // For MOD we go directly to runtime in the non-smi case. | 6881 // For MOD we go directly to runtime in the non-smi case. |
6774 break; | 6882 break; |
6775 } | 6883 } |
6776 case Token::BIT_OR: | 6884 case Token::BIT_OR: |
6777 case Token::BIT_AND: | 6885 case Token::BIT_AND: |
6778 case Token::BIT_XOR: | 6886 case Token::BIT_XOR: |
6779 case Token::SAR: | 6887 case Token::SAR: |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6892 __ bind(&non_smi_result); | 7000 __ bind(&non_smi_result); |
6893 } | 7001 } |
6894 __ mov(eax, Operand(esp, 1 * kPointerSize)); | 7002 __ mov(eax, Operand(esp, 1 * kPointerSize)); |
6895 __ mov(edx, Operand(esp, 2 * kPointerSize)); | 7003 __ mov(edx, Operand(esp, 2 * kPointerSize)); |
6896 break; | 7004 break; |
6897 } | 7005 } |
6898 default: UNREACHABLE(); break; | 7006 default: UNREACHABLE(); break; |
6899 } | 7007 } |
6900 | 7008 |
6901 // If all else fails, use the runtime system to get the correct | 7009 // If all else fails, use the runtime system to get the correct |
6902 // result. | 7010 // result. If arguments was passed in registers now place them on the |
| 7011 // stack in the correct order. |
6903 __ bind(&call_runtime); | 7012 __ bind(&call_runtime); |
| 7013 if (HasArgumentsInRegisters()) { |
| 7014 __ pop(ecx); |
| 7015 if (HasArgumentsReversed()) { |
| 7016 __ push(eax); |
| 7017 __ push(edx); |
| 7018 } else { |
| 7019 __ push(edx); |
| 7020 __ push(eax); |
| 7021 } |
| 7022 __ push(ecx); |
| 7023 } |
6904 switch (op_) { | 7024 switch (op_) { |
6905 case Token::ADD: { | 7025 case Token::ADD: { |
6906 // Test for string arguments before calling runtime. | 7026 // Test for string arguments before calling runtime. |
6907 Label not_strings, both_strings, not_string1, string1; | 7027 Label not_strings, both_strings, not_string1, string1; |
6908 Result answer; | 7028 Result answer; |
6909 __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. | 7029 __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. |
6910 __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. | 7030 __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. |
6911 __ test(eax, Immediate(kSmiTagMask)); | 7031 __ test(eax, Immediate(kSmiTagMask)); |
6912 __ j(zero, ¬_string1); | 7032 __ j(zero, ¬_string1); |
6913 __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax); | 7033 __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax); |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6970 break; | 7090 break; |
6971 case Token::SHR: | 7091 case Token::SHR: |
6972 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); | 7092 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); |
6973 break; | 7093 break; |
6974 default: | 7094 default: |
6975 UNREACHABLE(); | 7095 UNREACHABLE(); |
6976 } | 7096 } |
6977 } | 7097 } |
6978 | 7098 |
6979 | 7099 |
| 7100 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { |
| 7101 // If arguments are not passed in registers read them from the stack. |
| 7102 if (!HasArgumentsInRegisters()) { |
| 7103 __ mov(eax, Operand(esp, 1 * kPointerSize)); |
| 7104 __ mov(edx, Operand(esp, 2 * kPointerSize)); |
| 7105 } |
| 7106 } |
| 7107 |
| 7108 |
| 7109 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { |
| 7110 // If arguments are not passed in registers remove them from the stack before |
| 7111 // returning. |
| 7112 if (!HasArgumentsInRegisters()) { |
| 7113 __ ret(2 * kPointerSize); // Remove both operands |
| 7114 } else { |
| 7115 __ ret(0); |
| 7116 } |
| 7117 } |
| 7118 |
| 7119 |
6980 void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm, | 7120 void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm, |
6981 Label* need_gc, | 7121 Label* need_gc, |
6982 Register scratch1, | 7122 Register scratch1, |
6983 Register scratch2, | 7123 Register scratch2, |
6984 Register result) { | 7124 Register result) { |
6985 // Allocate heap number in new space. | 7125 // Allocate heap number in new space. |
6986 __ AllocateInNewSpace(HeapNumber::kSize, | 7126 __ AllocateInNewSpace(HeapNumber::kSize, |
6987 result, | 7127 result, |
6988 scratch1, | 7128 scratch1, |
6989 scratch2, | 7129 scratch2, |
(...skipping 980 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7970 | 8110 |
7971 int CompareStub::MinorKey() { | 8111 int CompareStub::MinorKey() { |
7972 // Encode the two parameters in a unique 16 bit value. | 8112 // Encode the two parameters in a unique 16 bit value. |
7973 ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); | 8113 ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); |
7974 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); | 8114 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); |
7975 } | 8115 } |
7976 | 8116 |
7977 #undef __ | 8117 #undef __ |
7978 | 8118 |
7979 } } // namespace v8::internal | 8119 } } // namespace v8::internal |
OLD | NEW |